Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"A handful of fixes:

- Fix an MCE corner case bug/crash found via MCE injection testing

- Fix 5-level paging boot crash

- Fix MCE recovery cache invalidation bug

- Fix regression on Xen guests caused by a recent PMD level mremap
speedup optimization"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Make set_pmd_at() paravirt aware
x86/mm/cpa: Fix set_mce_nospec()
x86/boot/compressed/64: Do not corrupt EDX on EFER.LME=1 setting
x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()

+29 -26
+2
arch/x86/boot/compressed/head_64.S
··· 602 602 3: 603 603 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ 604 604 pushl %ecx 605 + pushl %edx 605 606 movl $MSR_EFER, %ecx 606 607 rdmsr 607 608 btsl $_EFER_LME, %eax 608 609 wrmsr 610 + popl %edx 609 611 popl %ecx 610 612 611 613 /* Enable PAE and LA57 (if required) paging modes */
+1 -1
arch/x86/include/asm/pgtable.h
··· 1065 1065 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1066 1066 pmd_t *pmdp, pmd_t pmd) 1067 1067 { 1068 - native_set_pmd(pmdp, pmd); 1068 + set_pmd(pmdp, pmd); 1069 1069 } 1070 1070 1071 1071 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+1
arch/x86/kernel/cpu/mce/core.c
··· 784 784 quirk_no_way_out(i, m, regs); 785 785 786 786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 787 + m->bank = i; 787 788 mce_read_aux(m, i); 788 789 *msg = tmp; 789 790 return 1;
+25 -25
arch/x86/mm/pageattr.c
··· 230 230 231 231 #endif 232 232 233 + /* 234 + * See set_mce_nospec(). 235 + * 236 + * Machine check recovery code needs to change cache mode of poisoned pages to 237 + * UC to avoid speculative access logging another error. But passing the 238 + * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a 239 + * speculative access. So we cheat and flip the top bit of the address. This 240 + * works fine for the code that updates the page tables. But at the end of the 241 + * process we need to flush the TLB and cache and the non-canonical address 242 + * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. 243 + * 244 + * But in the common case we already have a canonical address. This code 245 + * will fix the top bit if needed and is a no-op otherwise. 246 + */ 247 + static inline unsigned long fix_addr(unsigned long addr) 248 + { 249 + #ifdef CONFIG_X86_64 250 + return (long)(addr << 1) >> 1; 251 + #else 252 + return addr; 253 + #endif 254 + } 255 + 233 256 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 234 257 { 235 258 if (cpa->flags & CPA_PAGES_ARRAY) { ··· 336 313 unsigned int i; 337 314 338 315 for (i = 0; i < cpa->numpages; i++) 339 - __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 316 + __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); 340 317 } 341 318 342 319 static void cpa_flush(struct cpa_data *data, int cache) ··· 370 347 * Only flush present addresses: 371 348 */ 372 349 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 373 - clflush_cache_range_opt((void *)addr, PAGE_SIZE); 350 + clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); 374 351 } 375 352 mb(); 376 353 } ··· 1649 1626 cpa->numpages = numpages; 1650 1627 return ret; 1651 1628 } 1652 - 1653 - /* 1654 - * Machine check recovery code needs to change cache mode of poisoned 1655 - * pages to UC to avoid speculative access logging another error. But 1656 - * passing the address of the 1:1 mapping to set_memory_uc() is a fine 1657 - * way to encourage a speculative access. So we cheat and flip the top 1658 - * bit of the address. This works fine for the code that updates the 1659 - * page tables. But at the end of the process we need to flush the cache 1660 - * and the non-canonical address causes a #GP fault when used by the 1661 - * CLFLUSH instruction. 1662 - * 1663 - * But in the common case we already have a canonical address. This code 1664 - * will fix the top bit if needed and is a no-op otherwise. 1665 - */ 1666 - static inline unsigned long make_addr_canonical_again(unsigned long addr) 1667 - { 1668 - #ifdef CONFIG_X86_64 1669 - return (long)(addr << 1) >> 1; 1670 - #else 1671 - return addr; 1672 - #endif 1673 - } 1674 - 1675 1629 1676 1630 static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1677 1631 pgprot_t mask_set, pgprot_t mask_clr,