Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 page table isolation fixes from Thomas Gleixner:
"Four patches addressing the PTI fallout as discussed and debugged
yesterday:

- Remove stale and pointless TLB flush invocations from the hotplug
code

- Remove stale preempt_disable/enable from __native_flush_tlb()

- Plug the memory leak in the write_ldt() error path"

* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ldt: Make LDT pgtable free conditional
x86/ldt: Plug memory leak in error path
x86/mm: Remove preempt_disable/enable() from __native_flush_tlb()
x86/smpboot: Remove stale TLB flush invocations

+16 -16
+8 -6
arch/x86/include/asm/tlbflush.h
··· 348 348 */ 349 349 static inline void __native_flush_tlb(void) 350 350 { 351 - invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 352 351 /* 353 - * If current->mm == NULL then we borrow a mm which may change 354 - * during a task switch and therefore we must not be preempted 355 - * while we write CR3 back: 352 + * Preemption or interrupts must be disabled to protect the access 353 + * to the per CPU variable and to prevent being preempted between 354 + * read_cr3() and write_cr3(). 356 355 */ 357 - preempt_disable(); 356 + WARN_ON_ONCE(preemptible()); 357 + 358 + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 359 + 360 + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ 358 361 native_write_cr3(__native_read_cr3()); 359 - preempt_enable(); 360 362 } 361 363 362 364 /*
+8 -1
arch/x86/kernel/ldt.c
··· 421 421 */ 422 422 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); 423 423 if (error) { 424 - free_ldt_struct(old_ldt); 424 + /* 425 + * This only can fail for the first LDT setup. If an LDT is 426 + * already installed then the PTE page is already 427 + * populated. Mop up a half populated page table. 428 + */ 429 + if (!WARN_ON_ONCE(old_ldt)) 430 + free_ldt_pgtables(mm); 431 + free_ldt_struct(new_ldt); 425 432 goto out_unlock; 426 433 } 427 434
-9
arch/x86/kernel/smpboot.c
··· 126 126 spin_lock_irqsave(&rtc_lock, flags); 127 127 CMOS_WRITE(0xa, 0xf); 128 128 spin_unlock_irqrestore(&rtc_lock, flags); 129 - local_flush_tlb(); 130 - pr_debug("1.\n"); 131 129 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 132 130 start_eip >> 4; 133 - pr_debug("2.\n"); 134 131 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 135 132 start_eip & 0xf; 136 - pr_debug("3.\n"); 137 133 } 138 134 139 135 static inline void smpboot_restore_warm_reset_vector(void) 140 136 { 141 137 unsigned long flags; 142 - 143 - /* 144 - * Install writable page 0 entry to set BIOS data area. 145 - */ 146 - local_flush_tlb(); 147 138 148 139 /* 149 140 * Paranoid: Set warm reset code and vector here back