Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_entry_for_7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 entry code updates from Dave Hansen:
"This is entirely composed of a set of long overdue VDSO cleanups. They
makes the VDSO build much more logical and zap quite a bit of old
cruft.

It also results in a coveted net-code-removal diffstat"

* tag 'x86_entry_for_7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/entry/vdso: Add vdso2c to .gitignore
x86/entry/vdso32: Omit '.cfi_offset eflags' for LLVM < 16
MAINTAINERS: Adjust vdso file entry in INTEL SGX
x86/entry/vdso/selftest: Update location of vgetrandom-chacha.S
x86/entry/vdso: Fix filtering of vdso compiler flags
x86/entry/vdso: Update the object paths for "make vdso_install"
x86/entry/vdso32: When using int $0x80, use it directly
x86/cpufeature: Replace X86_FEATURE_SYSENTER32 with X86_FEATURE_SYSFAST32
x86/vdso: Abstract out vdso system call internals
x86/entry/vdso: Include GNU_PROPERTY and GNU_STACK PHDRs
x86/entry/vdso32: Remove open-coded DWARF in sigreturn.S
x86/entry/vdso32: Remove SYSCALL_ENTER_KERNEL macro in sigreturn.S
x86/entry/vdso32: Don't rely on int80_landing_pad for adjusting ip
x86/entry/vdso: Refactor the vdso build
x86/entry/vdso: Move vdso2c to arch/x86/tools
x86/entry/vdso: Rename vdso_image_* to vdso*_image

+472 -507
+1 -1
MAINTAINERS
··· 13032 13032 Q: https://patchwork.kernel.org/project/intel-sgx/list/ 13033 13033 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/sgx 13034 13034 F: Documentation/arch/x86/sgx.rst 13035 - F: arch/x86/entry/vdso/vsgx.S 13035 + F: arch/x86/entry/vdso/vdso64/vsgx.S 13036 13036 F: arch/x86/include/asm/sgx.h 13037 13037 F: arch/x86/include/uapi/asm/sgx.h 13038 13038 F: arch/x86/kernel/cpu/sgx/*
+8
arch/x86/Kconfig.cpufeatures
··· 56 56 def_bool y 57 57 depends on MATOM 58 58 59 + config X86_REQUIRED_FEATURE_SYSFAST32 60 + def_bool y 61 + depends on X86_64 && !X86_FRED 62 + 59 63 config X86_REQUIRED_FEATURE_CPUID 60 64 def_bool y 61 65 depends on X86_64 ··· 123 119 config X86_DISABLED_FEATURE_CENTAUR_MCR 124 120 def_bool y 125 121 depends on X86_64 122 + 123 + config X86_DISABLED_FEATURE_SYSCALL32 124 + def_bool y 125 + depends on !X86_64 126 126 127 127 config X86_DISABLED_FEATURE_PCID 128 128 def_bool y
+4 -4
arch/x86/Makefile
··· 252 252 253 253 254 254 archscripts: scripts_basic 255 - $(Q)$(MAKE) $(build)=arch/x86/tools relocs 255 + $(Q)$(MAKE) $(build)=arch/x86/tools relocs vdso2c 256 256 257 257 ### 258 258 # Syscall table generation ··· 318 318 install: 319 319 $(call cmd,install) 320 320 321 - vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64.so.dbg 322 - vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdsox32.so.dbg 323 - vdso-install-$(CONFIG_COMPAT_32) += arch/x86/entry/vdso/vdso32.so.dbg 321 + vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64/vdso64.so.dbg 322 + vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdso64/vdsox32.so.dbg 323 + vdso-install-$(CONFIG_COMPAT_32) += arch/x86/entry/vdso/vdso32/vdso32.so.dbg 324 324 325 325 archprepare: checkbin 326 326 checkbin:
+1 -1
arch/x86/entry/syscall_32.c
··· 319 319 * convention. Adjust regs so it looks like we entered using int80. 320 320 */ 321 321 unsigned long landing_pad = (unsigned long)current->mm->context.vdso + 322 - vdso_image_32.sym_int80_landing_pad; 322 + vdso32_image.sym_int80_landing_pad; 323 323 324 324 /* 325 325 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
+4 -7
arch/x86/entry/vdso/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - vdso.lds 3 - vdsox32.lds 4 - vdso32-syscall-syms.lds 5 - vdso32-sysenter-syms.lds 6 - vdso32-int80-syms.lds 7 - vdso-image-*.c 8 - vdso2c 2 + *.lds 3 + *.so 4 + *.so.dbg 5 + vdso*-image.c
+6 -156
arch/x86/entry/vdso/Makefile
··· 3 3 # Building vDSO images for x86. 4 4 # 5 5 6 - # Include the generic Makefile to check the built vDSO: 7 - include $(srctree)/lib/vdso/Makefile.include 6 + # Regular kernel objects 7 + obj-y := vma.o extable.o 8 + obj-$(CONFIG_COMPAT_32) += vdso32-setup.o 8 9 9 - # Files to link into the vDSO: 10 - vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vgetrandom.o vgetrandom-chacha.o 11 - vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o 12 - vobjs32-y += vdso32/vclock_gettime.o vdso32/vgetcpu.o 13 - vobjs-$(CONFIG_X86_SGX) += vsgx.o 14 - 15 - # Files to link into the kernel: 16 - obj-y += vma.o extable.o 17 - 18 - # vDSO images to build: 19 - obj-$(CONFIG_X86_64) += vdso-image-64.o 20 - obj-$(CONFIG_X86_X32_ABI) += vdso-image-x32.o 21 - obj-$(CONFIG_COMPAT_32) += vdso-image-32.o vdso32-setup.o 22 - 23 - vobjs := $(addprefix $(obj)/, $(vobjs-y)) 24 - vobjs32 := $(addprefix $(obj)/, $(vobjs32-y)) 25 - 26 - $(obj)/vdso.o: $(obj)/vdso.so 27 - 28 - targets += vdso.lds $(vobjs-y) 29 - targets += vdso32/vdso32.lds $(vobjs32-y) 30 - 31 - targets += $(foreach x, 64 x32 32, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg) 32 - 33 - CPPFLAGS_vdso.lds += -P -C 34 - 35 - VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 \ 36 - -z max-page-size=4096 37 - 38 - $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE 39 - $(call if_changed,vdso_and_check) 40 - 41 - HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi 42 - hostprogs += vdso2c 43 - 44 - quiet_cmd_vdso2c = VDSO2C $@ 45 - cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@ 46 - 47 - $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE 48 - $(call if_changed,vdso2c) 49 - 50 - # 51 - # Don't omit frame pointers for ease of userspace debugging, but do 52 - # optimize sibling calls. 53 - # 54 - CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 55 - $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \ 56 - -fno-omit-frame-pointer -foptimize-sibling-calls \ 57 - -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO 58 - 59 - ifdef CONFIG_MITIGATION_RETPOLINE 60 - ifneq ($(RETPOLINE_VDSO_CFLAGS),) 61 - CFL += $(RETPOLINE_VDSO_CFLAGS) 62 - endif 63 - endif 64 - 65 - $(vobjs): KBUILD_CFLAGS := $(filter-out $(PADDING_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(KSTACK_ERASE_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) 66 - $(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO 67 - 68 - # 69 - # vDSO code runs in userspace and -pg doesn't help with profiling anyway. 70 - # 71 - CFLAGS_REMOVE_vclock_gettime.o = -pg 72 - CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg 73 - CFLAGS_REMOVE_vgetcpu.o = -pg 74 - CFLAGS_REMOVE_vdso32/vgetcpu.o = -pg 75 - CFLAGS_REMOVE_vsgx.o = -pg 76 - CFLAGS_REMOVE_vgetrandom.o = -pg 77 - 78 - # 79 - # X32 processes use x32 vDSO to access 64bit kernel data. 80 - # 81 - # Build x32 vDSO image: 82 - # 1. Compile x32 vDSO as 64bit. 83 - # 2. Convert object files to x32. 84 - # 3. Build x32 VDSO image with x32 objects, which contains 64bit codes 85 - # so that it can reach 64bit address space with 64bit pointers. 86 - # 87 - 88 - CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) 89 - VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \ 90 - -z max-page-size=4096 91 - 92 - # x32-rebranded versions 93 - vobjx32s-y := $(vobjs-y:.o=-x32.o) 94 - 95 - # same thing, but in the output directory 96 - vobjx32s := $(addprefix $(obj)/, $(vobjx32s-y)) 97 - 98 - # Convert 64bit object file to x32 for x32 vDSO. 99 - quiet_cmd_x32 = X32 $@ 100 - cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@ 101 - 102 - $(obj)/%-x32.o: $(obj)/%.o FORCE 103 - $(call if_changed,x32) 104 - 105 - targets += vdsox32.lds $(vobjx32s-y) 106 - 107 - $(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table 108 - $(obj)/%.so: $(obj)/%.so.dbg FORCE 109 - $(call if_changed,objcopy) 110 - 111 - $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE 112 - $(call if_changed,vdso_and_check) 113 - 114 - CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds) 115 - VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1 116 - 117 - KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO 118 - $(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) 119 - $(obj)/vdso32.so.dbg: asflags-$(CONFIG_X86_64) += -m32 120 - 121 - KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) 122 - KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) 123 - KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) 124 - KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) 125 - KBUILD_CFLAGS_32 := $(filter-out $(RANDSTRUCT_CFLAGS),$(KBUILD_CFLAGS_32)) 126 - KBUILD_CFLAGS_32 := $(filter-out $(KSTACK_ERASE_CFLAGS),$(KBUILD_CFLAGS_32)) 127 - KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) 128 - KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) 129 - KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32)) 130 - KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_CFI),$(KBUILD_CFLAGS_32)) 131 - KBUILD_CFLAGS_32 := $(filter-out $(PADDING_CFLAGS),$(KBUILD_CFLAGS_32)) 132 - KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic 133 - KBUILD_CFLAGS_32 += -fno-stack-protector 134 - KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 135 - KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 136 - KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 137 - KBUILD_CFLAGS_32 += -DBUILD_VDSO 138 - 139 - ifdef CONFIG_MITIGATION_RETPOLINE 140 - ifneq ($(RETPOLINE_VDSO_CFLAGS),) 141 - KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) 142 - endif 143 - endif 144 - 145 - $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 146 - 147 - $(obj)/vdso32.so.dbg: $(obj)/vdso32/vdso32.lds $(vobjs32) FORCE 148 - $(call if_changed,vdso_and_check) 149 - 150 - # 151 - # The DSO images are built using a special linker script. 152 - # 153 - quiet_cmd_vdso = VDSO $@ 154 - cmd_vdso = $(LD) -o $@ \ 155 - $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ 156 - -T $(filter %.lds,$^) $(filter %.o,$^) 157 - 158 - VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 --no-undefined \ 159 - $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack 160 - 161 - quiet_cmd_vdso_and_check = VDSO $@ 162 - cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check) 10 + # vDSO directories 11 + obj-$(CONFIG_X86_64) += vdso64/ 12 + obj-$(CONFIG_COMPAT_32) += vdso32/
+89
arch/x86/entry/vdso/common/Makefile.include
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Building vDSO images for x86. 4 + # 5 + 6 + # Include the generic Makefile to check the built vDSO: 7 + include $(srctree)/lib/vdso/Makefile.include 8 + 9 + obj-y += $(foreach x,$(vdsos-y),vdso$(x)-image.o) 10 + 11 + targets += $(foreach x,$(vdsos-y),vdso$(x)-image.c vdso$(x).so vdso$(x).so.dbg vdso$(x).lds) 12 + targets += $(vobjs-y) 13 + 14 + # vobjs-y with $(obj)/ prepended 15 + vobjs := $(addprefix $(obj)/,$(vobjs-y)) 16 + 17 + # Options for vdso*.lds 18 + CPPFLAGS_VDSO_LDS := -P -C -I$(src)/.. 19 + $(obj)/%.lds : KBUILD_CPPFLAGS += $(CPPFLAGS_VDSO_LDS) 20 + 21 + # 22 + # Options from KBUILD_[AC]FLAGS that should *NOT* be kept 23 + # 24 + flags-remove-y += \ 25 + -D__KERNEL__ -mcmodel=kernel -mregparm=3 \ 26 + -fno-pic -fno-PIC -fno-pie -fno-PIE \ 27 + -mfentry -pg \ 28 + $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(KSTACK_ERASE_CFLAGS) \ 29 + $(RETPOLINE_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ 30 + $(PADDING_CFLAGS) 31 + 32 + # 33 + # Don't omit frame pointers for ease of userspace debugging, but do 34 + # optimize sibling calls. 35 + # 36 + flags-y += -D__DISABLE_EXPORTS 37 + flags-y += -DDISABLE_BRANCH_PROFILING 38 + flags-y += -DBUILD_VDSO 39 + flags-y += -I$(src)/.. -I$(srctree) 40 + flags-y += -O2 -fpic 41 + flags-y += -fno-stack-protector 42 + flags-y += -fno-omit-frame-pointer 43 + flags-y += -foptimize-sibling-calls 44 + flags-y += -fasynchronous-unwind-tables 45 + 46 + # Reset cf protections enabled by compiler default 47 + flags-y += $(call cc-option, -fcf-protection=none) 48 + flags-$(X86_USER_SHADOW_STACK) += $(call cc-option, -fcf-protection=return) 49 + # When user space IBT is supported, enable this. 50 + # flags-$(CONFIG_USER_IBT) += $(call cc-option, -fcf-protection=branch) 51 + 52 + flags-$(CONFIG_MITIGATION_RETPOLINE) += $(RETPOLINE_VDSO_CFLAGS) 53 + 54 + # These need to be conditional on $(vobjs) as they do not apply to 55 + # the output vdso*-image.o files which are standard kernel objects. 56 + $(vobjs) : KBUILD_AFLAGS := \ 57 + $(filter-out $(flags-remove-y),$(KBUILD_AFLAGS)) $(flags-y) 58 + $(vobjs) : KBUILD_CFLAGS := \ 59 + $(filter-out $(flags-remove-y),$(KBUILD_CFLAGS)) $(flags-y) 60 + 61 + # 62 + # The VDSO images are built using a special linker script. 63 + # 64 + VDSO_LDFLAGS := -shared --hash-style=both --build-id=sha1 --no-undefined \ 65 + $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack 66 + 67 + quiet_cmd_vdso = VDSO $@ 68 + cmd_vdso = $(LD) -o $@ \ 69 + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$*) \ 70 + -T $(filter %.lds,$^) $(filter %.o,$^) 71 + quiet_cmd_vdso_and_check = VDSO $@ 72 + cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check) 73 + 74 + $(obj)/vdso%.so.dbg: $(obj)/vdso%.lds FORCE 75 + $(call if_changed,vdso_and_check) 76 + 77 + $(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table 78 + $(obj)/%.so: $(obj)/%.so.dbg FORCE 79 + $(call if_changed,objcopy) 80 + 81 + VDSO2C = $(objtree)/arch/x86/tools/vdso2c 82 + 83 + quiet_cmd_vdso2c = VDSO2C $@ 84 + cmd_vdso2c = $(VDSO2C) $< $(<:%.dbg=%) $@ 85 + 86 + $(obj)/%-image.c: $(obj)/%.so.dbg $(obj)/%.so $(VDSO2C) FORCE 87 + $(call if_changed,vdso2c) 88 + 89 + $(obj)/%-image.o: $(obj)/%-image.c
arch/x86/entry/vdso/vclock_gettime.c arch/x86/entry/vdso/common/vclock_gettime.c
+23 -15
arch/x86/entry/vdso/vdso-layout.lds.S arch/x86/entry/vdso/common/vdso-layout.lds.S
··· 47 47 *(.gnu.linkonce.b.*) 48 48 } :text 49 49 50 - /* 51 - * Discard .note.gnu.property sections which are unused and have 52 - * different alignment requirement from vDSO note sections. 53 - */ 54 - /DISCARD/ : { 50 + .note.gnu.property : { 55 51 *(.note.gnu.property) 56 - } 57 - .note : { *(.note.*) } :text :note 52 + } :text :note :gnu_property 53 + .note : { 54 + *(.note*) 55 + } :text :note 58 56 59 - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 60 - .eh_frame : { KEEP (*(.eh_frame)) } :text 61 - 57 + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 58 + .eh_frame : { 59 + KEEP (*(.eh_frame)) 60 + *(.eh_frame.*) 61 + } :text 62 62 63 63 /* 64 64 * Text is well-separated from actual data: there's plenty of ··· 87 87 * Very old versions of ld do not recognize this name token; use the constant. 88 88 */ 89 89 #define PT_GNU_EH_FRAME 0x6474e550 90 + #define PT_GNU_STACK 0x6474e551 91 + #define PT_GNU_PROPERTY 0x6474e553 90 92 91 93 /* 92 94 * We must supply the ELF program headers explicitly to get just one 93 95 * PT_LOAD segment, and set the flags explicitly to make segments read-only. 94 - */ 96 + */ 97 + #define PF_R FLAGS(4) 98 + #define PF_RW FLAGS(6) 99 + #define PF_RX FLAGS(5) 100 + 95 101 PHDRS 96 102 { 97 - text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ 98 - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 99 - note PT_NOTE FLAGS(4); /* PF_R */ 100 - eh_frame_hdr PT_GNU_EH_FRAME; 103 + text PT_LOAD PF_RX FILEHDR PHDRS; 104 + dynamic PT_DYNAMIC PF_R; 105 + note PT_NOTE PF_R; 106 + eh_frame_hdr PT_GNU_EH_FRAME PF_R; 107 + gnu_stack PT_GNU_STACK PF_RW; 108 + gnu_property PT_GNU_PROPERTY PF_R; 101 109 }
+4 -1
arch/x86/entry/vdso/vdso-note.S arch/x86/entry/vdso/common/note.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 3 4 * Here we can supply some information useful to userland. 4 5 */ 5 6 6 7 #include <linux/build-salt.h> 7 - #include <linux/uts.h> 8 8 #include <linux/version.h> 9 9 #include <linux/elfnote.h> 10 10 11 + /* Ideally this would use UTS_NAME, but using a quoted string here 12 + doesn't work. Remember to change this when changing the 13 + kernel's name. */ 11 14 ELFNOTE_START(Linux, 0, "a") 12 15 .long LINUX_VERSION_CODE 13 16 ELFNOTE_END
+3 -13
arch/x86/entry/vdso/vdso.lds.S arch/x86/entry/vdso/vdso64/vdsox32.lds.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * Linker script for 64-bit vDSO. 3 + * Linker script for x32 vDSO. 4 4 * We #include the file to define the layout details. 5 5 * 6 6 * This file defines the version script giving the user-exported symbols in 7 7 * the DSO. 8 8 */ 9 9 10 - #define BUILD_VDSO64 10 + #define BUILD_VDSOX32 11 11 12 - #include "vdso-layout.lds.S" 12 + #include "common/vdso-layout.lds.S" 13 13 14 14 /* 15 15 * This controls what userland symbols we export from the vDSO. ··· 17 17 VERSION { 18 18 LINUX_2.6 { 19 19 global: 20 - clock_gettime; 21 20 __vdso_clock_gettime; 22 - gettimeofday; 23 21 __vdso_gettimeofday; 24 - getcpu; 25 22 __vdso_getcpu; 26 - time; 27 23 __vdso_time; 28 - clock_getres; 29 24 __vdso_clock_getres; 30 - #ifdef CONFIG_X86_SGX 31 - __vdso_sgx_enter_enclave; 32 - #endif 33 - getrandom; 34 - __vdso_getrandom; 35 25 local: *; 36 26 }; 37 27 }
arch/x86/entry/vdso/vdso2c.c arch/x86/tools/vdso2c.c
arch/x86/entry/vdso/vdso2c.h arch/x86/tools/vdso2c.h
+24
arch/x86/entry/vdso/vdso32/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # 32-bit vDSO images for x86. 4 + # 5 + 6 + # The vDSOs built in this directory 7 + vdsos-y := 32 8 + 9 + # Files to link into the vDSO: 10 + vobjs-y := note.o vclock_gettime.o vgetcpu.o 11 + vobjs-y += system_call.o sigreturn.o 12 + 13 + # Compilation flags 14 + flags-y := -DBUILD_VDSO32 -m32 -mregparm=0 15 + flags-$(CONFIG_X86_64) += -include $(src)/fake_32bit_build.h 16 + flags-remove-y := -m64 17 + 18 + # The location of this include matters! 19 + include $(src)/../common/Makefile.include 20 + 21 + # Linker options for the vdso 22 + VDSO_LDFLAGS_32 := -m elf_i386 -soname linux-gate.so.1 23 + 24 + $(obj)/vdso32.so.dbg: $(vobjs)
+1 -18
arch/x86/entry/vdso/vdso32/note.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 4 - * Here we can supply some information useful to userland. 5 - */ 6 - 7 - #include <linux/build-salt.h> 8 - #include <linux/version.h> 9 - #include <linux/elfnote.h> 10 - 11 - /* Ideally this would use UTS_NAME, but using a quoted string here 12 - doesn't work. Remember to change this when changing the 13 - kernel's name. */ 14 - ELFNOTE_START(Linux, 0, "a") 15 - .long LINUX_VERSION_CODE 16 - ELFNOTE_END 17 - 18 - BUILD_SALT 1 + #include "common/note.S"
+42 -118
arch/x86/entry/vdso/vdso32/sigreturn.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #include <linux/linkage.h> 3 3 #include <asm/unistd_32.h> 4 + #include <asm/dwarf2.h> 4 5 #include <asm/asm-offsets.h> 5 6 6 - #ifndef SYSCALL_ENTER_KERNEL 7 - #define SYSCALL_ENTER_KERNEL int $0x80 7 + .macro STARTPROC_SIGNAL_FRAME sc 8 + CFI_STARTPROC simple 9 + CFI_SIGNAL_FRAME 10 + /* -4 as pretcode has already been popped */ 11 + CFI_DEF_CFA esp, \sc - 4 12 + CFI_OFFSET eip, IA32_SIGCONTEXT_ip 13 + CFI_OFFSET eax, IA32_SIGCONTEXT_ax 14 + CFI_OFFSET ebx, IA32_SIGCONTEXT_bx 15 + CFI_OFFSET ecx, IA32_SIGCONTEXT_cx 16 + CFI_OFFSET edx, IA32_SIGCONTEXT_dx 17 + CFI_OFFSET esp, IA32_SIGCONTEXT_sp 18 + CFI_OFFSET ebp, IA32_SIGCONTEXT_bp 19 + CFI_OFFSET esi, IA32_SIGCONTEXT_si 20 + CFI_OFFSET edi, IA32_SIGCONTEXT_di 21 + CFI_OFFSET es, IA32_SIGCONTEXT_es 22 + CFI_OFFSET cs, IA32_SIGCONTEXT_cs 23 + CFI_OFFSET ss, IA32_SIGCONTEXT_ss 24 + CFI_OFFSET ds, IA32_SIGCONTEXT_ds 25 + /* 26 + * .cfi_offset eflags requires LLVM 16 or newer: 27 + * 28 + * https://github.com/llvm/llvm-project/commit/67bd3c58c0c7389e39c5a2f4d3b1a30459ccf5b7 29 + * 30 + * Check for 16.0.1 to ensure the support is present, as 16.0.0 may be a 31 + * prerelease version. 32 + */ 33 + #if defined(CONFIG_AS_IS_GNU) || (defined(CONFIG_AS_IS_LLVM) && CONFIG_AS_VERSION >= 160001) 34 + CFI_OFFSET eflags, IA32_SIGCONTEXT_flags 8 35 #endif 36 + .endm 9 37 10 38 .text 11 39 .globl __kernel_sigreturn 12 40 .type __kernel_sigreturn,@function 13 - nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ 14 41 ALIGN 15 42 __kernel_sigreturn: 16 - .LSTART_sigreturn: 17 - popl %eax /* XXX does this mean it needs unwind info? */ 43 + STARTPROC_SIGNAL_FRAME IA32_SIGFRAME_sigcontext 44 + popl %eax 45 + CFI_ADJUST_CFA_OFFSET -4 18 46 movl $__NR_sigreturn, %eax 19 - SYSCALL_ENTER_KERNEL 20 - .LEND_sigreturn: 47 + int $0x80 21 48 SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) 22 - nop 23 - .size __kernel_sigreturn,.-.LSTART_sigreturn 49 + ud2a 50 + CFI_ENDPROC 51 + .size __kernel_sigreturn,.-__kernel_sigreturn 24 52 25 53 .globl __kernel_rt_sigreturn 26 54 .type __kernel_rt_sigreturn,@function 27 55 ALIGN 28 56 __kernel_rt_sigreturn: 29 - .LSTART_rt_sigreturn: 57 + STARTPROC_SIGNAL_FRAME IA32_RT_SIGFRAME_sigcontext 30 58 movl $__NR_rt_sigreturn, %eax 31 - SYSCALL_ENTER_KERNEL 32 - .LEND_rt_sigreturn: 59 + int $0x80 33 60 SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL) 34 - nop 35 - .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn 36 - .previous 37 - 38 - .section .eh_frame,"a",@progbits 39 - .LSTARTFRAMEDLSI1: 40 - .long .LENDCIEDLSI1-.LSTARTCIEDLSI1 41 - .LSTARTCIEDLSI1: 42 - .long 0 /* CIE ID */ 43 - .byte 1 /* Version number */ 44 - .string "zRS" /* NUL-terminated augmentation string */ 45 - .uleb128 1 /* Code alignment factor */ 46 - .sleb128 -4 /* Data alignment factor */ 47 - .byte 8 /* Return address register column */ 48 - .uleb128 1 /* Augmentation value length */ 49 - .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */ 50 - .byte 0 /* DW_CFA_nop */ 51 - .align 4 52 - .LENDCIEDLSI1: 53 - .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */ 54 - .LSTARTFDEDLSI1: 55 - .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */ 56 - /* HACK: The dwarf2 unwind routines will subtract 1 from the 57 - return address to get an address in the middle of the 58 - presumed call instruction. Since we didn't get here via 59 - a call, we need to include the nop before the real start 60 - to make up for it. */ 61 - .long .LSTART_sigreturn-1-. /* PC-relative start address */ 62 - .long .LEND_sigreturn-.LSTART_sigreturn+1 63 - .uleb128 0 /* Augmentation */ 64 - /* What follows are the instructions for the table generation. 65 - We record the locations of each register saved. This is 66 - complicated by the fact that the "CFA" is always assumed to 67 - be the value of the stack pointer in the caller. This means 68 - that we must define the CFA of this body of code to be the 69 - saved value of the stack pointer in the sigcontext. Which 70 - also means that there is no fixed relation to the other 71 - saved registers, which means that we must use DW_CFA_expression 72 - to compute their addresses. It also means that when we 73 - adjust the stack with the popl, we have to do it all over again. */ 74 - 75 - #define do_cfa_expr(offset) \ 76 - .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ 77 - .uleb128 1f-0f; /* length */ \ 78 - 0: .byte 0x74; /* DW_OP_breg4 */ \ 79 - .sleb128 offset; /* offset */ \ 80 - .byte 0x06; /* DW_OP_deref */ \ 81 - 1: 82 - 83 - #define do_expr(regno, offset) \ 84 - .byte 0x10; /* DW_CFA_expression */ \ 85 - .uleb128 regno; /* regno */ \ 86 - .uleb128 1f-0f; /* length */ \ 87 - 0: .byte 0x74; /* DW_OP_breg4 */ \ 88 - .sleb128 offset; /* offset */ \ 89 - 1: 90 - 91 - do_cfa_expr(IA32_SIGCONTEXT_sp+4) 92 - do_expr(0, IA32_SIGCONTEXT_ax+4) 93 - do_expr(1, IA32_SIGCONTEXT_cx+4) 94 - do_expr(2, IA32_SIGCONTEXT_dx+4) 95 - do_expr(3, IA32_SIGCONTEXT_bx+4) 96 - do_expr(5, IA32_SIGCONTEXT_bp+4) 97 - do_expr(6, IA32_SIGCONTEXT_si+4) 98 - do_expr(7, IA32_SIGCONTEXT_di+4) 99 - do_expr(8, IA32_SIGCONTEXT_ip+4) 100 - 101 - .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */ 102 - 103 - do_cfa_expr(IA32_SIGCONTEXT_sp) 104 - do_expr(0, IA32_SIGCONTEXT_ax) 105 - do_expr(1, IA32_SIGCONTEXT_cx) 106 - do_expr(2, IA32_SIGCONTEXT_dx) 107 - do_expr(3, IA32_SIGCONTEXT_bx) 108 - do_expr(5, IA32_SIGCONTEXT_bp) 109 - do_expr(6, IA32_SIGCONTEXT_si) 110 - do_expr(7, IA32_SIGCONTEXT_di) 111 - do_expr(8, IA32_SIGCONTEXT_ip) 112 - 113 - .align 4 114 - .LENDFDEDLSI1: 115 - 116 - .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */ 117 - .LSTARTFDEDLSI2: 118 - .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */ 119 - /* HACK: See above wrt unwind library assumptions. */ 120 - .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ 121 - .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 122 - .uleb128 0 /* Augmentation */ 123 - /* What follows are the instructions for the table generation. 124 - We record the locations of each register saved. This is 125 - slightly less complicated than the above, since we don't 126 - modify the stack pointer in the process. */ 127 - 128 - do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp) 129 - do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax) 130 - do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx) 131 - do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx) 132 - do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx) 133 - do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp) 134 - do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si) 135 - do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di) 136 - do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip) 137 - 138 - .align 4 139 - .LENDFDEDLSI2: 61 + ud2a 62 + CFI_ENDPROC 63 + .size __kernel_rt_sigreturn,.-__kernel_rt_sigreturn 140 64 .previous
+14 -8
arch/x86/entry/vdso/vdso32/system_call.S
··· 14 14 ALIGN 15 15 __kernel_vsyscall: 16 16 CFI_STARTPROC 17 + 18 + /* 19 + * If using int $0x80, there is no reason to muck about with the 20 + * stack here. Unfortunately just overwriting the push instructions 21 + * would mess up the CFI annotations, but it is only a 3-byte 22 + * NOP in that case. This could be avoided by patching the 23 + * vdso symbol table (not the code) and entry point, but that 24 + * would a fair bit of tooling work or by simply compiling 25 + * two different vDSO images, but that doesn't seem worth it. 26 + */ 27 + ALTERNATIVE "int $0x80; ret", "", X86_FEATURE_SYSFAST32 28 + 17 29 /* 18 30 * Reshuffle regs so that all of any of the entry instructions 19 31 * will preserve enough state. ··· 64 52 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" 65 53 #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" 66 54 67 - #ifdef CONFIG_X86_64 68 - /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ 69 - ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ 70 - SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 71 - #else 72 - ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP 73 - #endif 55 + ALTERNATIVE SYSENTER_SEQUENCE, SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 74 56 75 - /* Enter using int $0x80 */ 57 + /* Re-enter using int $0x80 */ 76 58 int $0x80 77 59 SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) 78 60
+1 -4
arch/x86/entry/vdso/vdso32/vclock_gettime.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #define BUILD_VDSO32 3 - #include "fake_32bit_build.h" 4 - #include "../vclock_gettime.c" 1 + #include "common/vclock_gettime.c"
+1 -1
arch/x86/entry/vdso/vdso32/vdso32.lds.S
··· 11 11 12 12 #define BUILD_VDSO32 13 13 14 - #include "../vdso-layout.lds.S" 14 + #include "common/vdso-layout.lds.S" 15 15 16 16 /* The ELF entry point can be used to set the AT_SYSINFO value. */ 17 17 ENTRY(__kernel_vsyscall);
+1 -3
arch/x86/entry/vdso/vdso32/vgetcpu.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "fake_32bit_build.h" 3 - #include "../vgetcpu.c" 1 + #include "common/vgetcpu.c"
+46
arch/x86/entry/vdso/vdso64/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # 64-bit vDSO images for x86. 4 + # 5 + 6 + # The vDSOs built in this directory 7 + vdsos-y := 64 8 + vdsos-$(CONFIG_X86_X32_ABI) += x32 9 + 10 + # Files to link into the vDSO: 11 + vobjs-y := note.o vclock_gettime.o vgetcpu.o 12 + vobjs-y += vgetrandom.o vgetrandom-chacha.o 13 + vobjs-$(CONFIG_X86_SGX) += vsgx.o 14 + 15 + # Compilation flags 16 + flags-y := -DBUILD_VDSO64 -m64 -mcmodel=small 17 + 18 + # The location of this include matters! 19 + include $(src)/../common/Makefile.include 20 + 21 + # 22 + # X32 processes use x32 vDSO to access 64bit kernel data. 23 + # 24 + # Build x32 vDSO image: 25 + # 1. Compile x32 vDSO as 64bit. 26 + # 2. Convert object files to x32. 27 + # 3. Build x32 VDSO image with x32 objects, which contains 64bit codes 28 + # so that it can reach 64bit address space with 64bit pointers. 29 + # 30 + 31 + # Convert 64bit object file to x32 for x32 vDSO. 32 + quiet_cmd_x32 = X32 $@ 33 + cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@ 34 + 35 + $(obj)/%-x32.o: $(obj)/%.o FORCE 36 + $(call if_changed,x32) 37 + 38 + vobjsx32 = $(patsubst %.o,%-x32.o,$(vobjs)) 39 + targets += $(patsubst %.o,%-x32.o,$(vobjs-y)) 40 + 41 + # Linker options for the vdso 42 + VDSO_LDFLAGS_64 := -m elf_x86_64 -soname linux-vdso.so.1 -z max-page-size=4096 43 + VDSO_LDFLAGS_x32 := $(subst elf_x86_64,elf32_x86_64,$(VDSO_LDFLAGS_64)) 44 + 45 + $(obj)/vdso64.so.dbg: $(vobjs) 46 + $(obj)/vdsox32.so.dbg: $(vobjsx32)
+1
arch/x86/entry/vdso/vdso64/note.S
··· 1 + #include "common/note.S"
+1
arch/x86/entry/vdso/vdso64/vclock_gettime.c
··· 1 + #include "common/vclock_gettime.c"
+1
arch/x86/entry/vdso/vdso64/vgetcpu.c
··· 1 + #include "common/vgetcpu.c"
+13 -3
arch/x86/entry/vdso/vdsox32.lds.S arch/x86/entry/vdso/vdso64/vdso64.lds.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * Linker script for x32 vDSO. 3 + * Linker script for 64-bit vDSO. 4 4 * We #include the file to define the layout details. 5 5 * 6 6 * This file defines the version script giving the user-exported symbols in 7 7 * the DSO. 8 8 */ 9 9 10 - #define BUILD_VDSOX32 10 + #define BUILD_VDSO64 11 11 12 - #include "vdso-layout.lds.S" 12 + #include "common/vdso-layout.lds.S" 13 13 14 14 /* 15 15 * This controls what userland symbols we export from the vDSO. ··· 17 17 VERSION { 18 18 LINUX_2.6 { 19 19 global: 20 + clock_gettime; 20 21 __vdso_clock_gettime; 22 + gettimeofday; 21 23 __vdso_gettimeofday; 24 + getcpu; 22 25 __vdso_getcpu; 26 + time; 23 27 __vdso_time; 28 + clock_getres; 24 29 __vdso_clock_getres; 30 + #ifdef CONFIG_X86_SGX 31 + __vdso_sgx_enter_enclave; 32 + #endif 33 + getrandom; 34 + __vdso_getrandom; 25 35 local: *; 26 36 }; 27 37 }
arch/x86/entry/vdso/vgetcpu.c arch/x86/entry/vdso/common/vgetcpu.c
arch/x86/entry/vdso/vgetrandom-chacha.S arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S
+1 -1
arch/x86/entry/vdso/vgetrandom.c arch/x86/entry/vdso/vdso64/vgetrandom.c
··· 4 4 */ 5 5 #include <linux/types.h> 6 6 7 - #include "../../../../lib/vdso/getrandom.c" 7 + #include "lib/vdso/getrandom.c" 8 8 9 9 ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) 10 10 {
+9 -13
arch/x86/entry/vdso/vma.c
··· 65 65 static void vdso_fix_landing(const struct vdso_image *image, 66 66 struct vm_area_struct *new_vma) 67 67 { 68 - if (in_ia32_syscall() && image == &vdso_image_32) { 69 - struct pt_regs *regs = current_pt_regs(); 70 - unsigned long vdso_land = image->sym_int80_landing_pad; 71 - unsigned long old_land_addr = vdso_land + 72 - (unsigned long)current->mm->context.vdso; 68 + struct pt_regs *regs = current_pt_regs(); 69 + unsigned long ipoffset = regs->ip - 70 + (unsigned long)current->mm->context.vdso; 73 71 74 - /* Fixing userspace landing - look at do_fast_syscall_32 */ 75 - if (regs->ip == old_land_addr) 76 - regs->ip = new_vma->vm_start + vdso_land; 77 - } 72 + if (ipoffset < image->size) 73 + regs->ip = new_vma->vm_start + ipoffset; 78 74 } 79 75 80 76 static int vdso_mremap(const struct vm_special_mapping *sm, ··· 226 230 if (vdso32_enabled != 1) /* Other values all mean "disabled" */ 227 231 return 0; 228 232 229 - return map_vdso(&vdso_image_32, 0); 233 + return map_vdso(&vdso32_image, 0); 230 234 } 231 235 232 236 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ··· 235 239 if (!vdso64_enabled) 236 240 return 0; 237 241 238 - return map_vdso(&vdso_image_64, 0); 242 + return map_vdso(&vdso64_image, 0); 239 243 } 240 244 241 245 return load_vdso32(); ··· 248 252 if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) { 249 253 if (!vdso64_enabled) 250 254 return 0; 251 - return map_vdso(&vdso_image_x32, 0); 255 + return map_vdso(&vdsox32_image, 0); 252 256 } 253 257 254 258 if (IS_ENABLED(CONFIG_IA32_EMULATION)) ··· 263 267 const struct vdso_image *image = current->mm->context.vdso_image; 264 268 unsigned long vdso = (unsigned long) current->mm->context.vdso; 265 269 266 - if (in_ia32_syscall() && image == &vdso_image_32) { 270 + if (in_ia32_syscall() && image == &vdso32_image) { 267 271 if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad || 268 272 regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) 269 273 return true;
arch/x86/entry/vdso/vsgx.S arch/x86/entry/vdso/vdso64/vsgx.S
+1 -1
arch/x86/include/asm/cpufeatures.h
··· 84 84 #define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */ 85 85 #define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */ 86 86 #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */ 87 - #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */ 87 + #define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */ 88 88 #define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */ 89 89 #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */ 90 90 #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
+1
arch/x86/include/asm/dwarf2.h
··· 20 20 #define CFI_RESTORE_STATE .cfi_restore_state 21 21 #define CFI_UNDEFINED .cfi_undefined 22 22 #define CFI_ESCAPE .cfi_escape 23 + #define CFI_SIGNAL_FRAME .cfi_signal_frame 23 24 24 25 #ifndef BUILD_VDSO 25 26 /*
+1 -1
arch/x86/include/asm/elf.h
··· 361 361 362 362 #define VDSO_ENTRY \ 363 363 ((unsigned long)current->mm->context.vdso + \ 364 - vdso_image_32.sym___kernel_vsyscall) 364 + vdso32_image.sym___kernel_vsyscall) 365 365 366 366 struct linux_binprm; 367 367
+3 -3
arch/x86/include/asm/vdso.h
··· 27 27 long sym_vdso32_rt_sigreturn_landing_pad; 28 28 }; 29 29 30 - extern const struct vdso_image vdso_image_64; 31 - extern const struct vdso_image vdso_image_x32; 32 - extern const struct vdso_image vdso_image_32; 30 + extern const struct vdso_image vdso64_image; 31 + extern const struct vdso_image vdsox32_image; 32 + extern const struct vdso_image vdso32_image; 33 33 34 34 extern int __init init_vdso_image(const struct vdso_image *image); 35 35
+8 -100
arch/x86/include/asm/vdso/gettimeofday.h
··· 18 18 #include <asm/msr.h> 19 19 #include <asm/pvclock.h> 20 20 #include <clocksource/hyperv_timer.h> 21 + #include <asm/vdso/sys_call.h> 21 22 22 23 #define VDSO_HAS_TIME 1 23 24 ··· 54 53 __attribute__((visibility("hidden"))); 55 54 #endif 56 55 57 - #ifndef BUILD_VDSO32 58 - 59 56 static __always_inline 60 57 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 61 58 { 62 - long ret; 63 - 64 - asm ("syscall" : "=a" (ret), "=m" (*_ts) : 65 - "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) : 66 - "rcx", "r11"); 67 - 68 - return ret; 59 + return VDSO_SYSCALL2(clock_gettime,64,_clkid,_ts); 69 60 } 70 61 71 62 static __always_inline 72 63 long gettimeofday_fallback(struct __kernel_old_timeval *_tv, 73 64 struct timezone *_tz) 74 65 { 75 - long ret; 76 - 77 - asm("syscall" : "=a" (ret) : 78 - "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory"); 79 - 80 - return ret; 66 + return VDSO_SYSCALL2(gettimeofday,,_tv,_tz); 81 67 } 82 68 83 69 static __always_inline 84 70 long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 85 71 { 86 - long ret; 87 - 88 - asm ("syscall" : "=a" (ret), "=m" (*_ts) : 89 - "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) : 90 - "rcx", "r11"); 91 - 92 - return ret; 72 + return VDSO_SYSCALL2(clock_getres,_time64,_clkid,_ts); 93 73 } 94 74 95 - #else 96 - 97 - static __always_inline 98 - long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 99 - { 100 - long ret; 101 - 102 - asm ( 103 - "mov %%ebx, %%edx \n" 104 - "mov %[clock], %%ebx \n" 105 - "call __kernel_vsyscall \n" 106 - "mov %%edx, %%ebx \n" 107 - : "=a" (ret), "=m" (*_ts) 108 - : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts) 109 - : "edx"); 110 - 111 - return ret; 112 - } 75 + #ifndef CONFIG_X86_64 113 76 114 77 static __always_inline 115 78 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 116 79 { 117 - long ret; 118 - 119 - asm ( 120 - "mov %%ebx, %%edx \n" 121 - "mov %[clock], %%ebx \n" 122 - "call __kernel_vsyscall \n" 123 - "mov %%edx, %%ebx \n" 124 - : "=a" (ret), "=m" (*_ts) 125 - : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) 126 - : "edx"); 127 - 128 - return ret; 129 - } 130 - 131 - static __always_inline 132 - long gettimeofday_fallback(struct __kernel_old_timeval *_tv, 133 - struct timezone *_tz) 134 - { 135 - long ret; 136 - 137 - asm( 138 - "mov %%ebx, %%edx \n" 139 - "mov %2, %%ebx \n" 140 - "call __kernel_vsyscall \n" 141 - "mov %%edx, %%ebx \n" 142 - : "=a" (ret) 143 - : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz) 144 - : "memory", "edx"); 145 - 146 - return ret; 80 + return VDSO_SYSCALL2(clock_gettime,,_clkid,_ts); 147 81 } 148 82 149 83 static __always_inline long 150 - clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 84 + clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 151 85 { 152 - long ret; 153 - 154 - asm ( 155 - "mov %%ebx, %%edx \n" 156 - "mov %[clock], %%ebx \n" 157 - "call __kernel_vsyscall \n" 158 - "mov %%edx, %%ebx \n" 159 - : "=a" (ret), "=m" (*_ts) 160 - : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts) 161 - : "edx"); 162 - 163 - return ret; 164 - } 165 - 166 - static __always_inline 167 - long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 168 - { 169 - long ret; 170 - 171 - asm ( 172 - "mov %%ebx, %%edx \n" 173 - "mov %[clock], %%ebx \n" 174 - "call __kernel_vsyscall \n" 175 - "mov %%edx, %%ebx \n" 176 - : "=a" (ret), "=m" (*_ts) 177 - : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) 178 - : "edx"); 179 - 180 - return ret; 86 + return VDSO_SYSCALL2(clock_getres,,_clkid,_ts); 181 87 } 182 88 183 89 #endif
+105
arch/x86/include/asm/vdso/sys_call.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Macros for issuing an inline system call from the vDSO. 4 + */ 5 + 6 + #ifndef X86_ASM_VDSO_SYS_CALL_H 7 + #define X86_ASM_VDSO_SYS_CALL_H 8 + 9 + #include <linux/compiler.h> 10 + #include <asm/cpufeatures.h> 11 + #include <asm/alternative.h> 12 + 13 + #ifdef CONFIG_X86_64 14 + # define __sys_instr "syscall" 15 + # define __sys_clobber "rcx", "r11", "memory" 16 + # define __sys_nr(x,y) __NR_ ## x 17 + # define __sys_reg1 "rdi" 18 + # define __sys_reg2 "rsi" 19 + # define __sys_reg3 "rdx" 20 + # define __sys_reg4 "r10" 21 + # define __sys_reg5 "r8" 22 + #else 23 + # define __sys_instr ALTERNATIVE("ds;ds;ds;int $0x80", \ 24 + "call __kernel_vsyscall", \ 25 + X86_FEATURE_SYSFAST32) 26 + # define __sys_clobber "memory" 27 + # define __sys_nr(x,y) __NR_ ## x ## y 28 + # define __sys_reg1 "ebx" 29 + # define __sys_reg2 "ecx" 30 + # define __sys_reg3 "edx" 31 + # define __sys_reg4 "esi" 32 + # define __sys_reg5 "edi" 33 + #endif 34 + 35 + /* 36 + * Example usage: 37 + * 38 + * result = VDSO_SYSCALL3(foo,64,x,y,z); 39 + * 40 + * ... calls foo(x,y,z) on 64 bits, and foo64(x,y,z) on 32 bits. 41 + * 42 + * VDSO_SYSCALL6() is currently missing, because it would require 43 + * special handling for %ebp on 32 bits when the vdso is compiled with 44 + * frame pointers enabled (the default on 32 bits.) Add it as a special 45 + * case when and if it becomes necessary. 46 + */ 47 + #define _VDSO_SYSCALL(name,suf32,...) \ 48 + ({ \ 49 + long _sys_num_ret = __sys_nr(name,suf32); \ 50 + asm_inline volatile( \ 51 + __sys_instr \ 52 + : "+a" (_sys_num_ret) \ 53 + : __VA_ARGS__ \ 54 + : __sys_clobber); \ 55 + _sys_num_ret; \ 56 + }) 57 + 58 + #define VDSO_SYSCALL0(name,suf32) \ 59 + _VDSO_SYSCALL(name,suf32) 60 + #define VDSO_SYSCALL1(name,suf32,a1) \ 61 + ({ \ 62 + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ 63 + _VDSO_SYSCALL(name,suf32, \ 64 + "r" (_sys_arg1)); \ 65 + }) 66 + #define VDSO_SYSCALL2(name,suf32,a1,a2) \ 67 + ({ \ 68 + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ 69 + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ 70 + _VDSO_SYSCALL(name,suf32, \ 71 + "r" (_sys_arg1), "r" (_sys_arg2)); \ 72 + }) 73 + #define VDSO_SYSCALL3(name,suf32,a1,a2,a3) \ 74 + ({ \ 75 + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ 76 + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ 77 + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ 78 + _VDSO_SYSCALL(name,suf32, \ 79 + "r" (_sys_arg1), "r" (_sys_arg2), \ 80 + "r" (_sys_arg3)); \ 81 + }) 82 + #define VDSO_SYSCALL4(name,suf32,a1,a2,a3,a4) \ 83 + ({ \ 84 + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ 85 + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ 86 + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ 87 + register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \ 88 + _VDSO_SYSCALL(name,suf32, \ 89 + "r" (_sys_arg1), "r" (_sys_arg2), \ 90 + "r" (_sys_arg3), "r" (_sys_arg4)); \ 91 + }) 92 + #define VDSO_SYSCALL5(name,suf32,a1,a2,a3,a4,a5) \ 93 + ({ \ 94 + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ 95 + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ 96 + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ 97 + register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \ 98 + register long _sys_arg5 asm(__sys_reg5) = (long)(a5); \ 99 + _VDSO_SYSCALL(name,suf32, \ 100 + "r" (_sys_arg1), "r" (_sys_arg2), \ 101 + "r" (_sys_arg3), "r" (_sys_arg4), \ 102 + "r" (_sys_arg5)); \ 103 + }) 104 + 105 + #endif /* X86_VDSO_SYS_CALL_H */
+6
arch/x86/kernel/asm-offsets.c
··· 63 63 OFFSET(IA32_SIGCONTEXT_bp, sigcontext_32, bp); 64 64 OFFSET(IA32_SIGCONTEXT_sp, sigcontext_32, sp); 65 65 OFFSET(IA32_SIGCONTEXT_ip, sigcontext_32, ip); 66 + OFFSET(IA32_SIGCONTEXT_es, sigcontext_32, es); 67 + OFFSET(IA32_SIGCONTEXT_cs, sigcontext_32, cs); 68 + OFFSET(IA32_SIGCONTEXT_ss, sigcontext_32, ss); 69 + OFFSET(IA32_SIGCONTEXT_ds, sigcontext_32, ds); 70 + OFFSET(IA32_SIGCONTEXT_flags, sigcontext_32, flags); 66 71 67 72 BLANK(); 73 + OFFSET(IA32_SIGFRAME_sigcontext, sigframe_ia32, sc); 68 74 OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); 69 75 #endif 70 76
-3
arch/x86/kernel/cpu/centaur.c
··· 102 102 (c->x86 >= 7)) 103 103 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 104 104 105 - #ifdef CONFIG_X86_64 106 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); 107 - #endif 108 105 if (c->x86_power & (1 << 8)) { 109 106 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 110 107 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+8
arch/x86/kernel/cpu/common.c
··· 1068 1068 init_scattered_cpuid_features(c); 1069 1069 init_speculation_control(c); 1070 1070 1071 + if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP)) 1072 + set_cpu_cap(c, X86_FEATURE_SYSFAST32); 1073 + 1071 1074 /* 1072 1075 * Clear/Set all flags overridden by options, after probe. 1073 1076 * This needs to happen each time we re-probe, which may happen ··· 1816 1813 * that it can't be enabled in 32-bit mode. 1817 1814 */ 1818 1815 setup_clear_cpu_cap(X86_FEATURE_PCID); 1816 + 1817 + /* 1818 + * Never use SYSCALL on a 32-bit kernel 1819 + */ 1820 + setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); 1819 1821 #endif 1820 1822 1821 1823 /*
+1 -3
arch/x86/kernel/cpu/intel.c
··· 236 236 clear_cpu_cap(c, X86_FEATURE_PSE); 237 237 } 238 238 239 - #ifdef CONFIG_X86_64 240 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); 241 - #else 239 + #ifndef CONFIG_X86_64 242 240 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 243 241 if (c->x86 == 15 && c->x86_cache_alignment == 64) 244 242 c->x86_cache_alignment = 128;
+1 -3
arch/x86/kernel/cpu/zhaoxin.c
··· 59 59 { 60 60 if (c->x86 >= 0x6) 61 61 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 62 - #ifdef CONFIG_X86_64 63 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); 64 - #endif 62 + 65 63 if (c->x86_power & (1 << 8)) { 66 64 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 67 65 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+1 -1
arch/x86/kernel/fred.c
··· 68 68 idt_invalidate(); 69 69 70 70 /* Use int $0x80 for 32-bit system calls in FRED mode */ 71 - setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); 71 + setup_clear_cpu_cap(X86_FEATURE_SYSFAST32); 72 72 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); 73 73 } 74 74
+3 -3
arch/x86/kernel/process_64.c
··· 941 941 #ifdef CONFIG_CHECKPOINT_RESTORE 942 942 # ifdef CONFIG_X86_X32_ABI 943 943 case ARCH_MAP_VDSO_X32: 944 - return prctl_map_vdso(&vdso_image_x32, arg2); 944 + return prctl_map_vdso(&vdsox32_image, arg2); 945 945 # endif 946 946 # ifdef CONFIG_IA32_EMULATION 947 947 case ARCH_MAP_VDSO_32: 948 - return prctl_map_vdso(&vdso_image_32, arg2); 948 + return prctl_map_vdso(&vdso32_image, arg2); 949 949 # endif 950 950 case ARCH_MAP_VDSO_64: 951 - return prctl_map_vdso(&vdso_image_64, arg2); 951 + return prctl_map_vdso(&vdso64_image, arg2); 952 952 #endif 953 953 #ifdef CONFIG_ADDRESS_MASKING 954 954 case ARCH_GET_UNTAG_MASK:
+2 -2
arch/x86/kernel/signal_32.c
··· 282 282 /* Return stub is in 32bit vsyscall page */ 283 283 if (current->mm->context.vdso) 284 284 restorer = current->mm->context.vdso + 285 - vdso_image_32.sym___kernel_sigreturn; 285 + vdso32_image.sym___kernel_sigreturn; 286 286 else 287 287 restorer = &frame->retcode; 288 288 } ··· 368 368 restorer = ksig->ka.sa.sa_restorer; 369 369 else 370 370 restorer = current->mm->context.vdso + 371 - vdso_image_32.sym___kernel_rt_sigreturn; 371 + vdso32_image.sym___kernel_rt_sigreturn; 372 372 unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault); 373 373 374 374 /*
+1
arch/x86/tools/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 relocs 3 + vdso2c
+10 -5
arch/x86/tools/Makefile
··· 38 38 39 39 $(obj)/insn_sanity.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c 40 40 41 - HOST_EXTRACFLAGS += -I$(srctree)/tools/include 42 - hostprogs += relocs 43 - relocs-objs := relocs_32.o relocs_64.o relocs_common.o 44 - PHONY += relocs 45 - relocs: $(obj)/relocs 41 + HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi \ 42 + -I$(srctree)/arch/$(SUBARCH)/include/uapi 43 + 44 + hostprogs += relocs vdso2c 45 + relocs-objs := relocs_32.o relocs_64.o relocs_common.o 46 + 47 + always-y := $(hostprogs) 48 + 49 + PHONY += $(hostprogs) 50 + $(hostprogs): %: $(obj)/% 46 51 @:
+18 -10
arch/x86/xen/setup.c
··· 990 990 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 991 991 } 992 992 993 - void xen_enable_sysenter(void) 994 - { 995 - if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) && 996 - register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat)) 997 - setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); 998 - } 999 - 1000 993 void xen_enable_syscall(void) 1001 994 { 1002 995 int ret; ··· 1001 1008 mechanism for syscalls. */ 1002 1009 } 1003 1010 1004 - if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) && 1005 - register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat)) 1011 + if (!cpu_feature_enabled(X86_FEATURE_SYSFAST32)) 1012 + return; 1013 + 1014 + if (cpu_feature_enabled(X86_FEATURE_SYSCALL32)) { 1015 + /* Use SYSCALL32 */ 1016 + ret = register_callback(CALLBACKTYPE_syscall32, 1017 + xen_entry_SYSCALL_compat); 1018 + 1019 + } else { 1020 + /* Use SYSENTER32 */ 1021 + ret = register_callback(CALLBACKTYPE_sysenter, 1022 + xen_entry_SYSENTER_compat); 1023 + } 1024 + 1025 + if (ret) { 1006 1026 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); 1027 + setup_clear_cpu_cap(X86_FEATURE_SYSFAST32); 1028 + } 1007 1029 } 1030 + 1008 1031 1009 1032 static void __init xen_pvmmu_arch_setup(void) 1010 1033 { ··· 1031 1022 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) 1032 1023 BUG(); 1033 1024 1034 - xen_enable_sysenter(); 1035 1025 xen_enable_syscall(); 1036 1026 } 1037 1027
+2 -3
arch/x86/xen/smp_pv.c
··· 65 65 touch_softlockup_watchdog(); 66 66 67 67 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ 68 - if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { 69 - xen_enable_sysenter(); 68 + if (!xen_feature(XENFEAT_supervisor_mode_kernel)) 70 69 xen_enable_syscall(); 71 - } 70 + 72 71 cpu = smp_processor_id(); 73 72 identify_secondary_cpu(cpu); 74 73 set_cpu_sibling_map(cpu);
-1
arch/x86/xen/xen-ops.h
··· 60 60 char * __init xen_memory_setup(void); 61 61 void __init xen_arch_setup(void); 62 62 void xen_banner(void); 63 - void xen_enable_sysenter(void); 64 63 void xen_enable_syscall(void); 65 64 void xen_vcpu_restore(void); 66 65
+1 -1
tools/testing/selftests/vDSO/vgetrandom-chacha.S
··· 16 16 #elif defined(__s390x__) 17 17 #include "../../../../arch/s390/kernel/vdso/vgetrandom-chacha.S" 18 18 #elif defined(__x86_64__) 19 - #include "../../../../arch/x86/entry/vdso/vgetrandom-chacha.S" 19 + #include "../../../../arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S" 20 20 #endif