Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.2_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Make sure the poking PGD is pinned for Xen PV as it requires it this
way

- Fixes for two resctrl races when moving a task or creating a new
monitoring group

- Fix SEV-SNP guests running under HyperV where MTRRs are disabled to
not return a UC- type mapping type on memremap() and thus cause a
serious slowdown

- Fix insn mnemonics in bioscall.S now that binutils is starting to fix
confusing insn suffixes

* tag 'x86_urgent_for_v6.2_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: fix poking_init() for Xen PV guests
x86/resctrl: Fix event counts regression in reused RMIDs
x86/resctrl: Fix task CLOSID/RMID update race
x86/pat: Fix pat_x_mtrr_type() for MTRR disabled case
x86/boot: Avoid using Intel mnemonics in AT&T syntax asm

+52 -20
+2 -2
arch/x86/boot/bioscall.S
··· 32 32 movw %dx, %si 33 33 movw %sp, %di 34 34 movw $11, %cx 35 - rep; movsd 35 + rep; movsl 36 36 37 37 /* Pop full state from the stack */ 38 38 popal ··· 67 67 jz 4f 68 68 movw %sp, %si 69 69 movw $11, %cx 70 - rep; movsd 70 + rep; movsl 71 71 4: addw $44, %sp 72 72 73 73 /* Restore state and return */
+33 -16
arch/x86/kernel/cpu/resctrl/monitor.c
··· 146 146 return entry; 147 147 } 148 148 149 + static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) 150 + { 151 + u64 msr_val; 152 + 153 + /* 154 + * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 155 + * with a valid event code for supported resource type and the bits 156 + * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 157 + * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 158 + * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 159 + * are error bits. 160 + */ 161 + wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 162 + rdmsrl(MSR_IA32_QM_CTR, msr_val); 163 + 164 + if (msr_val & RMID_VAL_ERROR) 165 + return -EIO; 166 + if (msr_val & RMID_VAL_UNAVAIL) 167 + return -EINVAL; 168 + 169 + *val = msr_val; 170 + return 0; 171 + } 172 + 149 173 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, 150 174 u32 rmid, 151 175 enum resctrl_event_id eventid) ··· 196 172 struct arch_mbm_state *am; 197 173 198 174 am = get_arch_mbm_state(hw_dom, rmid, eventid); 199 - if (am) 175 + if (am) { 200 176 memset(am, 0, sizeof(*am)); 177 + 178 + /* Record any initial, non-zero count value. */ 179 + __rmid_read(rmid, eventid, &am->prev_msr); 180 + } 201 181 } 202 182 203 183 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) ··· 219 191 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 220 192 struct arch_mbm_state *am; 221 193 u64 msr_val, chunks; 194 + int ret; 222 195 223 196 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) 224 197 return -EINVAL; 225 198 226 - /* 227 - * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 228 - * with a valid event code for supported resource type and the bits 229 - * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 230 - * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 231 - * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 232 - * are error bits. 233 - */ 234 - wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 235 - rdmsrl(MSR_IA32_QM_CTR, msr_val); 236 - 237 - if (msr_val & RMID_VAL_ERROR) 238 - return -EIO; 239 - if (msr_val & RMID_VAL_UNAVAIL) 240 - return -EINVAL; 199 + ret = __rmid_read(rmid, eventid, &msr_val); 200 + if (ret) 201 + return ret; 241 202 242 203 am = get_arch_mbm_state(hw_dom, rmid, eventid); 243 204 if (am) {
+11 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 580 580 /* 581 581 * Ensure the task's closid and rmid are written before determining if 582 582 * the task is current that will decide if it will be interrupted. 583 + * This pairs with the full barrier between the rq->curr update and 584 + * resctrl_sched_in() during context switch. 583 585 */ 584 - barrier(); 586 + smp_mb(); 585 587 586 588 /* 587 589 * By now, the task's closid and rmid are set. If the task is current ··· 2402 2400 is_rmid_match(t, from)) { 2403 2401 WRITE_ONCE(t->closid, to->closid); 2404 2402 WRITE_ONCE(t->rmid, to->mon.rmid); 2403 + 2404 + /* 2405 + * Order the closid/rmid stores above before the loads 2406 + * in task_curr(). This pairs with the full barrier 2407 + * between the rq->curr update and resctrl_sched_in() 2408 + * during context switch. 2409 + */ 2410 + smp_mb(); 2405 2411 2406 2412 /* 2407 2413 * If the task is on a CPU, set the CPU in the mask.
+4
arch/x86/mm/init.c
··· 26 26 #include <asm/pti.h> 27 27 #include <asm/text-patching.h> 28 28 #include <asm/memtype.h> 29 + #include <asm/paravirt.h> 29 30 30 31 /* 31 32 * We need to define the tracepoints somewhere, and tlb.c ··· 804 803 805 804 poking_mm = mm_alloc(); 806 805 BUG_ON(!poking_mm); 806 + 807 + /* Xen PV guests need the PGD to be pinned. */ 808 + paravirt_arch_dup_mmap(NULL, poking_mm); 807 809 808 810 /* 809 811 * Randomize the poking address, but make sure that the following page
+2 -1
arch/x86/mm/pat/memtype.c
··· 387 387 u8 mtrr_type, uniform; 388 388 389 389 mtrr_type = mtrr_type_lookup(start, end, &uniform); 390 - if (mtrr_type != MTRR_TYPE_WRBACK) 390 + if (mtrr_type != MTRR_TYPE_WRBACK && 391 + mtrr_type != MTRR_TYPE_INVALID) 391 392 return _PAGE_CACHE_MODE_UC_MINUS; 392 393 393 394 return _PAGE_CACHE_MODE_WB;