Merge tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Reset the why-the-system-rebooted register on AMD to avoid stale bits
   remaining from previous boots

 - Add a missing barrier in the TLB flushing code to prevent erroneously
   not flushing a TLB generation

 - Make sure cpa_flush() does not overshoot when computing the end range
   of a flush region

 - Fix resctrl bandwidth counting on AMD systems when the amount of
   monitoring groups created exceeds the number the hardware can track

* tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/CPU/AMD: Prevent reset reasons from being retained across reboot
  x86/mm: Fix SMP ordering in switch_mm_irqs_off()
  x86/mm: Fix overflow in __cpa_addr()
  x86/resctrl: Fix miscount of bandwidth event when reactivating previously unavailable RMID
This commit is contained in:
Linus Torvalds
2025-10-19 04:41:27 -10:00
4 changed files with 47 additions and 9 deletions

View File

@@ -1355,11 +1355,23 @@ static __init int print_s5_reset_status_mmio(void)
return 0;
value = ioread32(addr);
iounmap(addr);
/* Value with "all bits set" is an error response and should be ignored. */
if (value == U32_MAX)
if (value == U32_MAX) {
iounmap(addr);
return 0;
}
/*
* Clear all reason bits so they won't be retained if the next reset
* does not update the register. Besides, some bits are never cleared by
* hardware so it's software's responsibility to clear them.
*
* Writing the value back effectively clears all reason bits as they are
* write-1-to-clear.
*/
iowrite32(value, addr);
iounmap(addr);
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
if (!(value & BIT(i)))

View File

@@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *ignored)
{
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
int cpu = cpumask_any(&d->hdr.cpu_mask);
struct arch_mbm_state *am;
u64 msr_val;
u32 prmid;
int ret;
@@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
ret = __rmid_read_phys(prmid, eventid, &msr_val);
if (ret)
return ret;
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
if (!ret) {
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
} else if (ret == -EINVAL) {
am = get_arch_mbm_state(hw_dom, rmid, eventid);
if (am)
am->prev_msr = 0;
}
return 0;
return ret;
}
static int __cntr_id_read(u32 cntr_id, u64 *val)

View File

@@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
}
start = fix_addr(__cpa_addr(cpa, 0));
end = fix_addr(__cpa_addr(cpa, cpa->numpages));
end = start + cpa->numpages * PAGE_SIZE;
if (cpa->force_flush_all)
end = TLB_FLUSH_ALL;

View File

@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
/*
* Make sure this CPU is set in mm_cpumask() such that we'll
* receive invalidation IPIs.
*
* Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
* operation, or explicitly provide one. Such that:
*
* switch_mm_irqs_off() flush_tlb_mm_range()
* smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
* smp_mb(); // here // smp_mb() implied
* atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
*
* we properly order against flush_tlb_mm_range(), where the
* loaded_mm load can happen in mative_flush_tlb_multi() ->
* should_flush_tlb().
*
* This way switch_mm() must see the new tlb_gen or
* flush_tlb_mm_range() must see the new loaded_mm, or both.
*/
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
else
smp_mb();
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
ns = choose_new_asid(next, next_tlb_gen);