Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A set of fixes for x86:

- Prevent multiplication result truncation on 32bit. Introduced with
the early timestamp reworrk.

- Ensure microcode revision storage to be consistent under all
circumstances

- Prevent write tearing of PTEs

- Prevent confusion of user and kernel reegisters when dumping fatal
signals verbosely

- Make an error return value in a failure path of the vector
allocation negative. Returning EINVAL might the caller assume
success and causes further wreckage.

- A trivial kernel doc warning fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Use WRITE_ONCE() when setting PTEs
x86/apic/vector: Make error return value negative
x86/process: Don't mix user/kernel regs in 64bit __show_regs()
x86/tsc: Prevent result truncation on 32bit
x86: Fix kernel-doc atomic.h warnings
x86/microcode: Update the new microcode revision unconditionally
x86/microcode: Make sure boot_cpu_data.microcode is up-to-date

+89 -61
+6 -6
arch/x86/include/asm/atomic.h
··· 80 80 * true if the result is zero, or false for all 81 81 * other cases. 82 82 */ 83 - #define arch_atomic_sub_and_test arch_atomic_sub_and_test 84 83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 85 84 { 86 85 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 87 86 } 87 + #define arch_atomic_sub_and_test arch_atomic_sub_and_test 88 88 89 89 /** 90 90 * arch_atomic_inc - increment atomic variable ··· 92 92 * 93 93 * Atomically increments @v by 1. 94 94 */ 95 - #define arch_atomic_inc arch_atomic_inc 96 95 static __always_inline void arch_atomic_inc(atomic_t *v) 97 96 { 98 97 asm volatile(LOCK_PREFIX "incl %0" 99 98 : "+m" (v->counter)); 100 99 } 100 + #define arch_atomic_inc arch_atomic_inc 101 101 102 102 /** 103 103 * arch_atomic_dec - decrement atomic variable ··· 105 105 * 106 106 * Atomically decrements @v by 1. 107 107 */ 108 - #define arch_atomic_dec arch_atomic_dec 109 108 static __always_inline void arch_atomic_dec(atomic_t *v) 110 109 { 111 110 asm volatile(LOCK_PREFIX "decl %0" 112 111 : "+m" (v->counter)); 113 112 } 113 + #define arch_atomic_dec arch_atomic_dec 114 114 115 115 /** 116 116 * arch_atomic_dec_and_test - decrement and test ··· 120 120 * returns true if the result is 0, or false for all other 121 121 * cases. 122 122 */ 123 - #define arch_atomic_dec_and_test arch_atomic_dec_and_test 124 123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 125 124 { 126 125 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 127 126 } 127 + #define arch_atomic_dec_and_test arch_atomic_dec_and_test 128 128 129 129 /** 130 130 * arch_atomic_inc_and_test - increment and test ··· 134 134 * and returns true if the result is zero, or false for all 135 135 * other cases. 136 136 */ 137 - #define arch_atomic_inc_and_test arch_atomic_inc_and_test 138 137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 139 138 { 140 139 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 141 140 } 141 + #define arch_atomic_inc_and_test arch_atomic_inc_and_test 142 142 143 143 /** 144 144 * arch_atomic_add_negative - add and test if negative ··· 149 149 * if the result is negative, or false when 150 150 * result is greater than or equal to zero. 151 151 */ 152 - #define arch_atomic_add_negative arch_atomic_add_negative 153 152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) 154 153 { 155 154 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 156 155 } 156 + #define arch_atomic_add_negative arch_atomic_add_negative 157 157 158 158 /** 159 159 * arch_atomic_add_return - add integer and return
+4 -4
arch/x86/include/asm/atomic64_32.h
··· 205 205 * 206 206 * Atomically increments @v by 1. 207 207 */ 208 - #define arch_atomic64_inc arch_atomic64_inc 209 208 static inline void arch_atomic64_inc(atomic64_t *v) 210 209 { 211 210 __alternative_atomic64(inc, inc_return, /* no output */, 212 211 "S" (v) : "memory", "eax", "ecx", "edx"); 213 212 } 213 + #define arch_atomic64_inc arch_atomic64_inc 214 214 215 215 /** 216 216 * arch_atomic64_dec - decrement atomic64 variable ··· 218 218 * 219 219 * Atomically decrements @v by 1. 220 220 */ 221 - #define arch_atomic64_dec arch_atomic64_dec 222 221 static inline void arch_atomic64_dec(atomic64_t *v) 223 222 { 224 223 __alternative_atomic64(dec, dec_return, /* no output */, 225 224 "S" (v) : "memory", "eax", "ecx", "edx"); 226 225 } 226 + #define arch_atomic64_dec arch_atomic64_dec 227 227 228 228 /** 229 229 * arch_atomic64_add_unless - add unless the number is a given value ··· 245 245 return (int)a; 246 246 } 247 247 248 - #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero 249 248 static inline int arch_atomic64_inc_not_zero(atomic64_t *v) 250 249 { 251 250 int r; ··· 252 253 "S" (v) : "ecx", "edx", "memory"); 253 254 return r; 254 255 } 256 + #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero 255 257 256 - #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 257 258 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) 258 259 { 259 260 long long r; ··· 261 262 "S" (v) : "ecx", "memory"); 262 263 return r; 263 264 } 265 + #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 264 266 265 267 #undef alternative_atomic64 266 268 #undef __alternative_atomic64
+6 -6
arch/x86/include/asm/atomic64_64.h
··· 71 71 * true if the result is zero, or false for all 72 72 * other cases. 73 73 */ 74 - #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 75 74 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) 76 75 { 77 76 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); 78 77 } 78 + #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 79 79 80 80 /** 81 81 * arch_atomic64_inc - increment atomic64 variable ··· 83 83 * 84 84 * Atomically increments @v by 1. 85 85 */ 86 - #define arch_atomic64_inc arch_atomic64_inc 87 86 static __always_inline void arch_atomic64_inc(atomic64_t *v) 88 87 { 89 88 asm volatile(LOCK_PREFIX "incq %0" 90 89 : "=m" (v->counter) 91 90 : "m" (v->counter)); 92 91 } 92 + #define arch_atomic64_inc arch_atomic64_inc 93 93 94 94 /** 95 95 * arch_atomic64_dec - decrement atomic64 variable ··· 97 97 * 98 98 * Atomically decrements @v by 1. 99 99 */ 100 - #define arch_atomic64_dec arch_atomic64_dec 101 100 static __always_inline void arch_atomic64_dec(atomic64_t *v) 102 101 { 103 102 asm volatile(LOCK_PREFIX "decq %0" 104 103 : "=m" (v->counter) 105 104 : "m" (v->counter)); 106 105 } 106 + #define arch_atomic64_dec arch_atomic64_dec 107 107 108 108 /** 109 109 * arch_atomic64_dec_and_test - decrement and test ··· 113 113 * returns true if the result is 0, or false for all other 114 114 * cases. 115 115 */ 116 - #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 117 116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v) 118 117 { 119 118 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); 120 119 } 120 + #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 121 121 122 122 /** 123 123 * arch_atomic64_inc_and_test - increment and test ··· 127 127 * and returns true if the result is zero, or false for all 128 128 * other cases. 129 129 */ 130 - #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 131 130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v) 132 131 { 133 132 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); 134 133 } 134 + #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 135 135 136 136 /** 137 137 * arch_atomic64_add_negative - add and test if negative ··· 142 142 * if the result is negative, or false when 143 143 * result is greater than or equal to zero. 144 144 */ 145 - #define arch_atomic64_add_negative arch_atomic64_add_negative 146 145 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) 147 146 { 148 147 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); 149 148 } 149 + #define arch_atomic64_add_negative arch_atomic64_add_negative 150 150 151 151 /** 152 152 * arch_atomic64_add_return - add and return
+11 -1
arch/x86/include/asm/kdebug.h
··· 22 22 DIE_NMIUNKNOWN, 23 23 }; 24 24 25 + enum show_regs_mode { 26 + SHOW_REGS_SHORT, 27 + /* 28 + * For when userspace crashed, but we don't think it's our fault, and 29 + * therefore don't print kernel registers. 30 + */ 31 + SHOW_REGS_USER, 32 + SHOW_REGS_ALL 33 + }; 34 + 25 35 extern void die(const char *, struct pt_regs *,long); 26 36 extern int __must_check __die(const char *, struct pt_regs *, long); 27 37 extern void show_stack_regs(struct pt_regs *regs); 28 - extern void __show_regs(struct pt_regs *regs, int all); 38 + extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); 29 39 extern void show_iret_regs(struct pt_regs *regs); 30 40 extern unsigned long oops_begin(void); 31 41 extern void oops_end(unsigned long, struct pt_regs *, int signr);
+1 -1
arch/x86/include/asm/pgtable.h
··· 1195 1195 return xchg(pmdp, pmd); 1196 1196 } else { 1197 1197 pmd_t old = *pmdp; 1198 - *pmdp = pmd; 1198 + WRITE_ONCE(*pmdp, pmd); 1199 1199 return old; 1200 1200 } 1201 1201 }
+11 -11
arch/x86/include/asm/pgtable_64.h
··· 55 55 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 56 56 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 57 57 58 + static inline void native_set_pte(pte_t *ptep, pte_t pte) 59 + { 60 + WRITE_ONCE(*ptep, pte); 61 + } 62 + 58 63 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 59 64 pte_t *ptep) 60 65 { 61 - *ptep = native_make_pte(0); 62 - } 63 - 64 - static inline void native_set_pte(pte_t *ptep, pte_t pte) 65 - { 66 - *ptep = pte; 66 + native_set_pte(ptep, native_make_pte(0)); 67 67 } 68 68 69 69 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) ··· 73 73 74 74 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 75 75 { 76 - *pmdp = pmd; 76 + WRITE_ONCE(*pmdp, pmd); 77 77 } 78 78 79 79 static inline void native_pmd_clear(pmd_t *pmd) ··· 109 109 110 110 static inline void native_set_pud(pud_t *pudp, pud_t pud) 111 111 { 112 - *pudp = pud; 112 + WRITE_ONCE(*pudp, pud); 113 113 } 114 114 115 115 static inline void native_pud_clear(pud_t *pud) ··· 137 137 pgd_t pgd; 138 138 139 139 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 140 - *p4dp = p4d; 140 + WRITE_ONCE(*p4dp, p4d); 141 141 return; 142 142 } 143 143 144 144 pgd = native_make_pgd(native_p4d_val(p4d)); 145 145 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); 146 - *p4dp = native_make_p4d(native_pgd_val(pgd)); 146 + WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); 147 147 } 148 148 149 149 static inline void native_p4d_clear(p4d_t *p4d) ··· 153 153 154 154 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 155 155 { 156 - *pgdp = pti_set_user_pgtbl(pgdp, pgd); 156 + WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); 157 157 } 158 158 159 159 static inline void native_pgd_clear(pgd_t *pgd)
+1 -1
arch/x86/kernel/apic/vector.c
··· 413 413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { 414 414 /* Something in the core code broke! Survive gracefully */ 415 415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); 416 - return EINVAL; 416 + return -EINVAL; 417 417 } 418 418 419 419 ret = assign_managed_vector(irqd, vector_searchmask);
+16 -8
arch/x86/kernel/cpu/microcode/amd.c
··· 504 504 struct microcode_amd *mc_amd; 505 505 struct ucode_cpu_info *uci; 506 506 struct ucode_patch *p; 507 + enum ucode_state ret; 507 508 u32 rev, dummy; 508 509 509 510 BUG_ON(raw_smp_processor_id() != cpu); ··· 522 521 523 522 /* need to apply patch? */ 524 523 if (rev >= mc_amd->hdr.patch_id) { 525 - c->microcode = rev; 526 - uci->cpu_sig.rev = rev; 527 - return UCODE_OK; 524 + ret = UCODE_OK; 525 + goto out; 528 526 } 529 527 530 528 if (__apply_microcode_amd(mc_amd)) { ··· 531 531 cpu, mc_amd->hdr.patch_id); 532 532 return UCODE_ERROR; 533 533 } 534 - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, 535 - mc_amd->hdr.patch_id); 536 534 537 - uci->cpu_sig.rev = mc_amd->hdr.patch_id; 538 - c->microcode = mc_amd->hdr.patch_id; 535 + rev = mc_amd->hdr.patch_id; 536 + ret = UCODE_UPDATED; 539 537 540 - return UCODE_UPDATED; 538 + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 539 + 540 + out: 541 + uci->cpu_sig.rev = rev; 542 + c->microcode = rev; 543 + 544 + /* Update boot_cpu_data's revision too, if we're on the BSP: */ 545 + if (c->cpu_index == boot_cpu_data.cpu_index) 546 + boot_cpu_data.microcode = rev; 547 + 548 + return ret; 541 549 } 542 550 543 551 static int install_equiv_cpu_table(const u8 *buf)
+13 -6
arch/x86/kernel/cpu/microcode/intel.c
··· 795 795 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 796 796 struct cpuinfo_x86 *c = &cpu_data(cpu); 797 797 struct microcode_intel *mc; 798 + enum ucode_state ret; 798 799 static int prev_rev; 799 800 u32 rev; 800 801 ··· 818 817 */ 819 818 rev = intel_get_microcode_revision(); 820 819 if (rev >= mc->hdr.rev) { 821 - uci->cpu_sig.rev = rev; 822 - c->microcode = rev; 823 - return UCODE_OK; 820 + ret = UCODE_OK; 821 + goto out; 824 822 } 825 823 826 824 /* ··· 848 848 prev_rev = rev; 849 849 } 850 850 851 - uci->cpu_sig.rev = rev; 852 - c->microcode = rev; 851 + ret = UCODE_UPDATED; 853 852 854 - return UCODE_UPDATED; 853 + out: 854 + uci->cpu_sig.rev = rev; 855 + c->microcode = rev; 856 + 857 + /* Update boot_cpu_data's revision too, if we're on the BSP: */ 858 + if (c->cpu_index == boot_cpu_data.cpu_index) 859 + boot_cpu_data.microcode = rev; 860 + 861 + return ret; 855 862 } 856 863 857 864 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+3 -8
arch/x86/kernel/dumpstack.c
··· 146 146 * they can be printed in the right context. 147 147 */ 148 148 if (!partial && on_stack(info, regs, sizeof(*regs))) { 149 - __show_regs(regs, 0); 149 + __show_regs(regs, SHOW_REGS_SHORT); 150 150 151 151 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, 152 152 IRET_FRAME_SIZE)) { ··· 344 344 oops_exit(); 345 345 346 346 /* Executive summary in case the oops scrolled away */ 347 - __show_regs(&exec_summary_regs, true); 347 + __show_regs(&exec_summary_regs, SHOW_REGS_ALL); 348 348 349 349 if (!signr) 350 350 return; ··· 407 407 408 408 void show_regs(struct pt_regs *regs) 409 409 { 410 - bool all = true; 411 - 412 410 show_regs_print_info(KERN_DEFAULT); 413 411 414 - if (IS_ENABLED(CONFIG_X86_32)) 415 - all = !user_mode(regs); 416 - 417 - __show_regs(regs, all); 412 + __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL); 418 413 419 414 /* 420 415 * When in-kernel, we also print out the stack at the time of the fault..
+2 -2
arch/x86/kernel/process_32.c
··· 59 59 #include <asm/intel_rdt_sched.h> 60 60 #include <asm/proto.h> 61 61 62 - void __show_regs(struct pt_regs *regs, int all) 62 + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 63 63 { 64 64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 65 65 unsigned long d0, d1, d2, d3, d6, d7; ··· 85 85 printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", 86 86 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); 87 87 88 - if (!all) 88 + if (mode != SHOW_REGS_ALL) 89 89 return; 90 90 91 91 cr0 = read_cr0();
+10 -2
arch/x86/kernel/process_64.c
··· 62 62 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); 63 63 64 64 /* Prints also some state that isn't saved in the pt_regs */ 65 - void __show_regs(struct pt_regs *regs, int all) 65 + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) 66 66 { 67 67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 68 68 unsigned long d0, d1, d2, d3, d6, d7; ··· 87 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", 88 88 regs->r13, regs->r14, regs->r15); 89 89 90 - if (!all) 90 + if (mode == SHOW_REGS_SHORT) 91 91 return; 92 + 93 + if (mode == SHOW_REGS_USER) { 94 + rdmsrl(MSR_FS_BASE, fs); 95 + rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 96 + printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", 97 + fs, shadowgs); 98 + return; 99 + } 92 100 93 101 asm("movl %%ds,%0" : "=r" (ds)); 94 102 asm("movl %%cs,%0" : "=r" (cs));
+1 -1
arch/x86/kernel/tsc.c
··· 1415 1415 1416 1416 static unsigned long __init get_loops_per_jiffy(void) 1417 1417 { 1418 - unsigned long lpj = tsc_khz * KHZ; 1418 + u64 lpj = (u64)tsc_khz * KHZ; 1419 1419 1420 1420 do_div(lpj, HZ); 1421 1421 return lpj;
+4 -4
arch/x86/mm/pgtable.c
··· 269 269 if (pgd_val(pgd) != 0) { 270 270 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 271 271 272 - *pgdp = native_make_pgd(0); 272 + pgd_clear(pgdp); 273 273 274 274 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 275 275 pmd_free(mm, pmd); ··· 494 494 int changed = !pte_same(*ptep, entry); 495 495 496 496 if (changed && dirty) 497 - *ptep = entry; 497 + set_pte(ptep, entry); 498 498 499 499 return changed; 500 500 } ··· 509 509 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 510 510 511 511 if (changed && dirty) { 512 - *pmdp = entry; 512 + set_pmd(pmdp, entry); 513 513 /* 514 514 * We had a write-protection fault here and changed the pmd 515 515 * to to more permissive. No need to flush the TLB for that, ··· 529 529 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 530 530 531 531 if (changed && dirty) { 532 - *pudp = entry; 532 + set_pud(pudp, entry); 533 533 /* 534 534 * We had a write-protection fault here and changed the pud 535 535 * to to more permissive. No need to flush the TLB for that,