Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.16_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Make sure the array tracking which kernel text positions need to be
alternatives-patched doesn't get mishandled by out-of-order
modifications, leading to it overflowing and causing page faults when
patching

- Avoid an infinite loop when early code does a ranged TLB invalidation
before the broadcast TLB invalidation count of how many pages it can
flush, has been read from CPUID

- Fix a CONFIG_MODULES typo

- Disable broadcast TLB invalidation when PTI is enabled to avoid an
overflow of the bitmap tracking dynamic ASIDs which need to be
flushed when the kernel switches between the user and kernel address
space

- Handle the case of a CPU going offline and thus reporting zeroes when
reading top-level events in the resctrl code

* tag 'x86_urgent_for_v6.16_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/alternatives: Fix int3 handling failure from broken text_poke array
x86/mm: Fix early boot use of INVPLGB
x86/its: Fix an ifdef typo in its_alloc()
x86/mm: Disable INVLPGB when PTI is enabled
x86,fs/resctrl: Remove inappropriate references to cacheinfo in the resctrl subsystem

+32 -18
+2 -2
arch/x86/kernel/alternative.c
··· 228 228 struct its_array *pages = &its_pages; 229 229 void *page; 230 230 231 - #ifdef CONFIG_MODULE 231 + #ifdef CONFIG_MODULES 232 232 if (its_mod) 233 233 pages = &its_mod->arch.its_pages; 234 234 #endif ··· 3138 3138 */ 3139 3139 void __ref smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate) 3140 3140 { 3141 - __smp_text_poke_batch_add(addr, opcode, len, emulate); 3141 + smp_text_poke_batch_add(addr, opcode, len, emulate); 3142 3142 smp_text_poke_batch_finish(); 3143 3143 }
+1 -1
arch/x86/kernel/cpu/amd.c
··· 31 31 32 32 #include "cpu.h" 33 33 34 - u16 invlpgb_count_max __ro_after_init; 34 + u16 invlpgb_count_max __ro_after_init = 1; 35 35 36 36 static inline int rdmsrq_amd_safe(unsigned msr, u64 *p) 37 37 {
+4 -2
arch/x86/kernel/cpu/resctrl/core.c
··· 498 498 struct rdt_hw_mon_domain *hw_dom; 499 499 struct rdt_domain_hdr *hdr; 500 500 struct rdt_mon_domain *d; 501 + struct cacheinfo *ci; 501 502 int err; 502 503 503 504 lockdep_assert_held(&domain_list_lock); ··· 526 525 d = &hw_dom->d_resctrl; 527 526 d->hdr.id = id; 528 527 d->hdr.type = RESCTRL_MON_DOMAIN; 529 - d->ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); 530 - if (!d->ci) { 528 + ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); 529 + if (!ci) { 531 530 pr_warn_once("Can't find L3 cache for CPU:%d resource %s\n", cpu, r->name); 532 531 mon_domain_free(hw_dom); 533 532 return; 534 533 } 534 + d->ci_id = ci->id; 535 535 cpumask_set_cpu(cpu, &d->hdr.cpu_mask); 536 536 537 537 arch_mon_domain_online(r, d);
+5
arch/x86/mm/pti.c
··· 98 98 return; 99 99 100 100 setup_force_cpu_cap(X86_FEATURE_PTI); 101 + 102 + if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { 103 + pr_debug("PTI enabled, disabling INVLPGB\n"); 104 + setup_clear_cpu_cap(X86_FEATURE_INVLPGB); 105 + } 101 106 } 102 107 103 108 static int __init pti_parse_cmdline(char *arg)
+9 -4
fs/resctrl/ctrlmondata.c
··· 594 594 struct rmid_read rr = {0}; 595 595 struct rdt_mon_domain *d; 596 596 struct rdtgroup *rdtgrp; 597 + int domid, cpu, ret = 0; 597 598 struct rdt_resource *r; 599 + struct cacheinfo *ci; 598 600 struct mon_data *md; 599 - int domid, ret = 0; 600 601 601 602 rdtgrp = rdtgroup_kn_lock_live(of->kn); 602 603 if (!rdtgrp) { ··· 624 623 * one that matches this cache id. 625 624 */ 626 625 list_for_each_entry(d, &r->mon_domains, hdr.list) { 627 - if (d->ci->id == domid) { 628 - rr.ci = d->ci; 626 + if (d->ci_id == domid) { 627 + rr.ci_id = d->ci_id; 628 + cpu = cpumask_any(&d->hdr.cpu_mask); 629 + ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); 630 + if (!ci) 631 + continue; 629 632 mon_event_read(&rr, r, NULL, rdtgrp, 630 - &d->ci->shared_cpu_map, evtid, false); 633 + &ci->shared_cpu_map, evtid, false); 631 634 goto checkresult; 632 635 } 633 636 }
+2 -2
fs/resctrl/internal.h
··· 98 98 * domains in @r sharing L3 @ci.id 99 99 * @evtid: Which monitor event to read. 100 100 * @first: Initialize MBM counter when true. 101 - * @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains. 101 + * @ci_id: Cacheinfo id for L3. Only set when @d is NULL. Used when summing domains. 102 102 * @err: Error encountered when reading counter. 103 103 * @val: Returned value of event counter. If @rgrp is a parent resource group, 104 104 * @val includes the sum of event counts from its child resource groups. ··· 112 112 struct rdt_mon_domain *d; 113 113 enum resctrl_event_id evtid; 114 114 bool first; 115 - struct cacheinfo *ci; 115 + unsigned int ci_id; 116 116 int err; 117 117 u64 val; 118 118 void *arch_mon_ctx;
+4 -2
fs/resctrl/monitor.c
··· 361 361 { 362 362 int cpu = smp_processor_id(); 363 363 struct rdt_mon_domain *d; 364 + struct cacheinfo *ci; 364 365 struct mbm_state *m; 365 366 int err, ret; 366 367 u64 tval = 0; ··· 389 388 } 390 389 391 390 /* Summing domains that share a cache, must be on a CPU for that cache. */ 392 - if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map)) 391 + ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); 392 + if (!ci || ci->id != rr->ci_id) 393 393 return -EINVAL; 394 394 395 395 /* ··· 402 400 */ 403 401 ret = -EINVAL; 404 402 list_for_each_entry(d, &rr->r->mon_domains, hdr.list) { 405 - if (d->ci->id != rr->ci->id) 403 + if (d->ci_id != rr->ci_id) 406 404 continue; 407 405 err = resctrl_arch_rmid_read(rr->r, d, closid, rmid, 408 406 rr->evtid, &tval, rr->arch_mon_ctx);
+3 -3
fs/resctrl/rdtgroup.c
··· 3036 3036 char name[32]; 3037 3037 3038 3038 snc_mode = r->mon_scope == RESCTRL_L3_NODE; 3039 - sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id); 3039 + sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id); 3040 3040 if (snc_mode) 3041 3041 sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id); 3042 3042 ··· 3061 3061 return -EPERM; 3062 3062 3063 3063 list_for_each_entry(mevt, &r->evt_list, list) { 3064 - domid = do_sum ? d->ci->id : d->hdr.id; 3064 + domid = do_sum ? d->ci_id : d->hdr.id; 3065 3065 priv = mon_get_kn_priv(r->rid, domid, mevt, do_sum); 3066 3066 if (WARN_ON_ONCE(!priv)) 3067 3067 return -EINVAL; ··· 3089 3089 lockdep_assert_held(&rdtgroup_mutex); 3090 3090 3091 3091 snc_mode = r->mon_scope == RESCTRL_L3_NODE; 3092 - sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id); 3092 + sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id); 3093 3093 kn = kernfs_find_and_get(parent_kn, name); 3094 3094 if (kn) { 3095 3095 /*
+2 -2
include/linux/resctrl.h
··· 159 159 /** 160 160 * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource 161 161 * @hdr: common header for different domain types 162 - * @ci: cache info for this domain 162 + * @ci_id: cache info id for this domain 163 163 * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold 164 164 * @mbm_total: saved state for MBM total bandwidth 165 165 * @mbm_local: saved state for MBM local bandwidth ··· 170 170 */ 171 171 struct rdt_mon_domain { 172 172 struct rdt_domain_hdr hdr; 173 - struct cacheinfo *ci; 173 + unsigned int ci_id; 174 174 unsigned long *rmid_busy_llc; 175 175 struct mbm_state *mbm_total; 176 176 struct mbm_state *mbm_local;