Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fix from Peter Anvin:
"A single fix to not invoke the espfix code on Xen PV, as it turns out
to oops the guest when invoked after all. This patch leaves some
amount of dead code, in particular unnecessary initialization of the
espfix stacks when they won't be used, but in the interest of keeping
the patch minimal that cleanup can wait for the next cycle"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86_64/entry/xen: Do not invoke espfix64 on Xen

+11 -21
+1 -1
arch/x86/include/asm/irqflags.h
··· 129 129 130 130 #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ 131 131 132 - #define INTERRUPT_RETURN iretq 132 + #define INTERRUPT_RETURN jmp native_iret 133 133 #define USERGS_SYSRET64 \ 134 134 swapgs; \ 135 135 sysretq;
+10 -18
arch/x86/kernel/entry_64.S
··· 830 830 RESTORE_ARGS 1,8,1 831 831 832 832 irq_return: 833 + INTERRUPT_RETURN 834 + 835 + ENTRY(native_iret) 833 836 /* 834 837 * Are we returning to a stack segment from the LDT? Note: in 835 838 * 64-bit mode SS:RSP on the exception stack is always valid. 836 839 */ 837 840 #ifdef CONFIG_X86_ESPFIX64 838 841 testb $4,(SS-RIP)(%rsp) 839 - jnz irq_return_ldt 842 + jnz native_irq_return_ldt 840 843 #endif 841 844 842 - irq_return_iret: 843 - INTERRUPT_RETURN 844 - _ASM_EXTABLE(irq_return_iret, bad_iret) 845 - 846 - #ifdef CONFIG_PARAVIRT 847 - ENTRY(native_iret) 845 + native_irq_return_iret: 848 846 iretq 849 - _ASM_EXTABLE(native_iret, bad_iret) 850 - #endif 847 + _ASM_EXTABLE(native_irq_return_iret, bad_iret) 851 848 852 849 #ifdef CONFIG_X86_ESPFIX64 853 - irq_return_ldt: 850 + native_irq_return_ldt: 854 851 pushq_cfi %rax 855 852 pushq_cfi %rdi 856 853 SWAPGS ··· 869 872 SWAPGS 870 873 movq %rax,%rsp 871 874 popq_cfi %rax 872 - jmp irq_return_iret 875 + jmp native_irq_return_iret 873 876 #endif 874 877 875 878 .section .fixup,"ax" ··· 953 956 cmpl $__KERNEL_CS,CS(%rdi) 954 957 jne do_double_fault 955 958 movq RIP(%rdi),%rax 956 - cmpq $irq_return_iret,%rax 957 - #ifdef CONFIG_PARAVIRT 958 - je 1f 959 - cmpq $native_iret,%rax 960 - #endif 959 + cmpq $native_irq_return_iret,%rax 961 960 jne do_double_fault /* This shouldn't happen... */ 962 - 1: 963 961 movq PER_CPU_VAR(kernel_stack),%rax 964 962 subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ 965 963 movq %rax,RSP(%rdi) ··· 1420 1428 */ 1421 1429 error_kernelspace: 1422 1430 incl %ebx 1423 - leaq irq_return_iret(%rip),%rcx 1431 + leaq native_irq_return_iret(%rip),%rcx 1424 1432 cmpq %rcx,RIP+8(%rsp) 1425 1433 je error_swapgs 1426 1434 movl %ecx,%eax /* zero extend */
-2
arch/x86/kernel/paravirt_patch_64.c
··· 6 6 DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); 7 7 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); 8 8 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); 9 - DEF_NATIVE(pv_cpu_ops, iret, "iretq"); 10 9 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); 11 10 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); 12 11 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); ··· 49 50 PATCH_SITE(pv_irq_ops, save_fl); 50 51 PATCH_SITE(pv_irq_ops, irq_enable); 51 52 PATCH_SITE(pv_irq_ops, irq_disable); 52 - PATCH_SITE(pv_cpu_ops, iret); 53 53 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); 54 54 PATCH_SITE(pv_cpu_ops, usergs_sysret32); 55 55 PATCH_SITE(pv_cpu_ops, usergs_sysret64);