Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"A number of fixes and some late updates:

- make in_compat_syscall() behavior on x86-32 similar to other
platforms, this touches a number of generic files but is not
intended to impact non-x86 platforms.

- objtool fixes

- PAT preemption fix

- paravirt fixes/cleanups

- cpufeatures updates for new instructions

- earlyprintk quirk

- make microcode version in sysfs world-readable (it is already
world-readable in procfs)

- minor cleanups and fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
compat: Cleanup in_compat_syscall() callers
x86/compat: Adjust in_compat_syscall() to generic code under !COMPAT
objtool: Support GCC 9 cold subfunction naming scheme
x86/numa_emulation: Fix uniform-split numa emulation
x86/paravirt: Remove unused _paravirt_ident_32
x86/mm/pat: Disable preemption around __flush_tlb_all()
x86/paravirt: Remove GPL from pv_ops export
x86/traps: Use format string with panic() call
x86: Clean up 'sizeof x' => 'sizeof(x)'
x86/cpufeatures: Enumerate MOVDIR64B instruction
x86/cpufeatures: Enumerate MOVDIRI instruction
x86/earlyprintk: Add a force option for pciserial device
objtool: Support per-function rodata sections
x86/microcode: Make revision and processor flags world-readable

+198 -171
+5 -1
Documentation/admin-guide/kernel-parameters.txt
··· 1068 1068 earlyprintk=serial[,0x...[,baudrate]] 1069 1069 earlyprintk=ttySn[,baudrate] 1070 1070 earlyprintk=dbgp[debugController#] 1071 - earlyprintk=pciserial,bus:device.function[,baudrate] 1071 + earlyprintk=pciserial[,force],bus:device.function[,baudrate] 1072 1072 earlyprintk=xdbc[xhciController#] 1073 1073 1074 1074 earlyprintk is useful when the kernel crashes before ··· 1099 1099 The xen output can only be used by Xen PV guests. 1100 1100 1101 1101 The sclp output can only be used on s390. 1102 + 1103 + The optional "force" to "pciserial" enables use of a 1104 + PCI device even when its classcode is not of the 1105 + UART class. 1102 1106 1103 1107 edac_report= [HW,EDAC] Control how to report EDAC event 1104 1108 Format: {"on" | "off" | "force"}
+1 -1
arch/x86/boot/cpucheck.c
··· 113 113 { 114 114 int err; 115 115 116 - memset(&cpu.flags, 0, sizeof cpu.flags); 116 + memset(&cpu.flags, 0, sizeof(cpu.flags)); 117 117 cpu.level = 3; 118 118 119 119 if (has_eflag(X86_EFLAGS_AC))
+2 -2
arch/x86/boot/early_serial_console.c
··· 50 50 int pos = 0; 51 51 int port = 0; 52 52 53 - if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) { 53 + if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) { 54 54 char *e; 55 55 56 56 if (!strncmp(arg, "serial", 6)) { ··· 124 124 * console=uart8250,io,0x3f8,115200n8 125 125 * need to make sure it is last one console ! 126 126 */ 127 - if (cmdline_find_option("console", optstr, sizeof optstr) <= 0) 127 + if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0) 128 128 return; 129 129 130 130 options = optstr;
+3 -3
arch/x86/boot/edd.c
··· 76 76 { 77 77 struct biosregs ireg, oreg; 78 78 79 - memset(ei, 0, sizeof *ei); 79 + memset(ei, 0, sizeof(*ei)); 80 80 81 81 /* Check Extensions Present */ 82 82 ··· 133 133 struct edd_info ei, *edp; 134 134 u32 *mbrptr; 135 135 136 - if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { 136 + if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) { 137 137 if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { 138 138 do_edd = 1; 139 139 do_mbr = 0; ··· 166 166 */ 167 167 if (!get_edd_info(devno, &ei) 168 168 && boot_params.eddbuf_entries < EDDMAXNR) { 169 - memcpy(edp, &ei, sizeof ei); 169 + memcpy(edp, &ei, sizeof(ei)); 170 170 edp++; 171 171 boot_params.eddbuf_entries++; 172 172 }
+2 -2
arch/x86/boot/main.c
··· 36 36 const struct old_cmdline * const oldcmd = 37 37 (const struct old_cmdline *)OLD_CL_ADDRESS; 38 38 39 - BUILD_BUG_ON(sizeof boot_params != 4096); 40 - memcpy(&boot_params.hdr, &hdr, sizeof hdr); 39 + BUILD_BUG_ON(sizeof(boot_params) != 4096); 40 + memcpy(&boot_params.hdr, &hdr, sizeof(hdr)); 41 41 42 42 if (!boot_params.hdr.cmd_line_ptr && 43 43 oldcmd->cl_magic == OLD_CL_MAGIC) {
+1 -1
arch/x86/boot/memory.c
··· 26 26 27 27 initregs(&ireg); 28 28 ireg.ax = 0xe820; 29 - ireg.cx = sizeof buf; 29 + ireg.cx = sizeof(buf); 30 30 ireg.edx = SMAP; 31 31 ireg.di = (size_t)&buf; 32 32
+1 -1
arch/x86/boot/regs.c
··· 21 21 22 22 void initregs(struct biosregs *reg) 23 23 { 24 - memset(reg, 0, sizeof *reg); 24 + memset(reg, 0, sizeof(*reg)); 25 25 reg->eflags |= X86_EFLAGS_CF; 26 26 reg->ds = ds(); 27 27 reg->es = ds();
+3 -3
arch/x86/boot/video-vesa.c
··· 62 62 if (mode & ~0x1ff) 63 63 continue; 64 64 65 - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ 65 + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ 66 66 67 67 ireg.ax = 0x4f01; 68 68 ireg.cx = mode; ··· 109 109 int is_graphic; 110 110 u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA; 111 111 112 - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ 112 + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ 113 113 114 114 initregs(&ireg); 115 115 ireg.ax = 0x4f01; ··· 241 241 struct biosregs ireg, oreg; 242 242 243 243 /* Apparently used as a nonsense token... */ 244 - memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info); 244 + memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info)); 245 245 246 246 if (vginfo.version < 0x0200) 247 247 return; /* EDID requires VBE 2.0+ */
+1 -1
arch/x86/boot/video.c
··· 115 115 } else if ((key >= '0' && key <= '9') || 116 116 (key >= 'A' && key <= 'Z') || 117 117 (key >= 'a' && key <= 'z')) { 118 - if (len < sizeof entry_buf) { 118 + if (len < sizeof(entry_buf)) { 119 119 entry_buf[len++] = key; 120 120 putchar(key); 121 121 }
+1 -1
arch/x86/events/intel/core.c
··· 4535 4535 } 4536 4536 } 4537 4537 4538 - snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); 4538 + snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 4539 4539 4540 4540 if (version >= 2 && extra_attr) { 4541 4541 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+8 -1
arch/x86/include/asm/compat.h
··· 217 217 return false; 218 218 } 219 219 220 - static inline bool in_compat_syscall(void) 220 + static inline bool in_32bit_syscall(void) 221 221 { 222 222 return in_ia32_syscall() || in_x32_syscall(); 223 223 } 224 + 225 + #ifdef CONFIG_COMPAT 226 + static inline bool in_compat_syscall(void) 227 + { 228 + return in_32bit_syscall(); 229 + } 224 230 #define in_compat_syscall in_compat_syscall /* override the generic impl */ 231 + #endif 225 232 226 233 struct compat_siginfo; 227 234 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+2
arch/x86/include/asm/cpufeatures.h
··· 331 331 #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 332 332 #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 333 333 #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ 334 + #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ 335 + #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ 334 336 335 337 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 336 338 #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
+1 -3
arch/x86/include/asm/ftrace.h
··· 76 76 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 77 77 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 78 78 { 79 - if (in_compat_syscall()) 80 - return true; 81 - return false; 79 + return in_32bit_syscall(); 82 80 } 83 81 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 84 82 #endif /* !COMPILE_OFFSETS */
-2
arch/x86/include/asm/paravirt_types.h
··· 361 361 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 362 362 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 363 363 364 - unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); 365 364 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 366 365 unsigned paravirt_patch_default(u8 type, void *insnbuf, 367 366 unsigned long addr, unsigned len); ··· 650 651 void paravirt_flush_lazy_mmu(void); 651 652 652 653 void _paravirt_nop(void); 653 - u32 _paravirt_ident_32(u32); 654 654 u64 _paravirt_ident_64(u64); 655 655 656 656 #define paravirt_nop ((void *)_paravirt_nop)
+6
arch/x86/include/asm/tlbflush.h
··· 453 453 */ 454 454 static inline void __flush_tlb_all(void) 455 455 { 456 + /* 457 + * This is to catch users with enabled preemption and the PGE feature 458 + * and don't trigger the warning in __native_flush_tlb(). 459 + */ 460 + VM_WARN_ON_ONCE(preemptible()); 461 + 456 462 if (boot_cpu_has(X86_FEATURE_PGE)) { 457 463 __flush_tlb_global(); 458 464 } else {
+2 -2
arch/x86/kernel/cpu/common.c
··· 1074 1074 #endif 1075 1075 c->x86_cache_alignment = c->x86_clflush_size; 1076 1076 1077 - memset(&c->x86_capability, 0, sizeof c->x86_capability); 1077 + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1078 1078 c->extended_cpuid_level = 0; 1079 1079 1080 1080 if (!have_cpuid_p()) ··· 1317 1317 c->x86_virt_bits = 32; 1318 1318 #endif 1319 1319 c->x86_cache_alignment = c->x86_clflush_size; 1320 - memset(&c->x86_capability, 0, sizeof c->x86_capability); 1320 + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1321 1321 1322 1322 generic_identify(c); 1323 1323
+1 -1
arch/x86/kernel/cpu/mcheck/mce.c
··· 2215 2215 if (dev) 2216 2216 return 0; 2217 2217 2218 - dev = kzalloc(sizeof *dev, GFP_KERNEL); 2218 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2219 2219 if (!dev) 2220 2220 return -ENOMEM; 2221 2221 dev->id = cpu;
+2 -2
arch/x86/kernel/cpu/microcode/core.c
··· 666 666 } 667 667 668 668 static DEVICE_ATTR_WO(reload); 669 - static DEVICE_ATTR(version, 0400, version_show, NULL); 670 - static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); 669 + static DEVICE_ATTR(version, 0444, version_show, NULL); 670 + static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); 671 671 672 672 static struct attribute *mc_default_attrs[] = { 673 673 &dev_attr_version.attr,
+1 -1
arch/x86/kernel/cpu/mtrr/generic.c
··· 798 798 local_irq_restore(flags); 799 799 800 800 /* Use the atomic bitops to update the global mask */ 801 - for (count = 0; count < sizeof mask * 8; ++count) { 801 + for (count = 0; count < sizeof(mask) * 8; ++count) { 802 802 if (mask & 0x01) 803 803 set_bit(count, &smp_changes_mask); 804 804 mask >>= 1;
+3 -3
arch/x86/kernel/cpu/mtrr/if.c
··· 174 174 case MTRRIOC_SET_PAGE_ENTRY: 175 175 case MTRRIOC_DEL_PAGE_ENTRY: 176 176 case MTRRIOC_KILL_PAGE_ENTRY: 177 - if (copy_from_user(&sentry, arg, sizeof sentry)) 177 + if (copy_from_user(&sentry, arg, sizeof(sentry))) 178 178 return -EFAULT; 179 179 break; 180 180 case MTRRIOC_GET_ENTRY: 181 181 case MTRRIOC_GET_PAGE_ENTRY: 182 - if (copy_from_user(&gentry, arg, sizeof gentry)) 182 + if (copy_from_user(&gentry, arg, sizeof(gentry))) 183 183 return -EFAULT; 184 184 break; 185 185 #ifdef CONFIG_COMPAT ··· 332 332 switch (cmd) { 333 333 case MTRRIOC_GET_ENTRY: 334 334 case MTRRIOC_GET_PAGE_ENTRY: 335 - if (copy_to_user(arg, &gentry, sizeof gentry)) 335 + if (copy_to_user(arg, &gentry, sizeof(gentry))) 336 336 err = -EFAULT; 337 337 break; 338 338 #ifdef CONFIG_COMPAT
+19 -10
arch/x86/kernel/early_printk.c
··· 213 213 * early_pci_serial_init() 214 214 * 215 215 * This function is invoked when the early_printk param starts with "pciserial" 216 - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the 217 - * location of a PCI device that must be a UART device. 216 + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe 217 + * the location of a PCI device that must be a UART device. "force" is optional 218 + * and overrides the use of an UART device with a wrong PCI class code. 218 219 */ 219 220 static __init void early_pci_serial_init(char *s) 220 221 { ··· 225 224 u32 classcode, bar0; 226 225 u16 cmdreg; 227 226 char *e; 227 + int force = 0; 228 228 229 - 230 - /* 231 - * First, part the param to get the BDF values 232 - */ 233 229 if (*s == ',') 234 230 ++s; 235 231 236 232 if (*s == 0) 237 233 return; 238 234 235 + /* Force the use of an UART device with wrong class code */ 236 + if (!strncmp(s, "force,", 6)) { 237 + force = 1; 238 + s += 6; 239 + } 240 + 241 + /* 242 + * Part the param to get the BDF values 243 + */ 239 244 bus = (u8)simple_strtoul(s, &e, 16); 240 245 s = e; 241 246 if (*s != ':') ··· 260 253 s++; 261 254 262 255 /* 263 - * Second, find the device from the BDF 256 + * Find the device from the BDF 264 257 */ 265 258 cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); 266 259 classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); ··· 271 264 */ 272 265 if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && 273 266 (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || 274 - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ 275 - return; 267 + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { 268 + if (!force) 269 + return; 270 + } 276 271 277 272 /* 278 273 * Determine if it is IO or memory mapped ··· 298 289 } 299 290 300 291 /* 301 - * Lastly, initialize the hardware 292 + * Initialize the hardware 302 293 */ 303 294 if (*s) { 304 295 if (strcmp(s, "nocfg") == 0)
+1 -1
arch/x86/kernel/head64.c
··· 385 385 */ 386 386 sme_map_bootdata(real_mode_data); 387 387 388 - memcpy(&boot_params, real_mode_data, sizeof boot_params); 388 + memcpy(&boot_params, real_mode_data, sizeof(boot_params)); 389 389 sanitize_boot_params(&boot_params); 390 390 cmd_line_ptr = get_cmd_line_ptr(); 391 391 if (cmd_line_ptr) {
+4 -4
arch/x86/kernel/msr.c
··· 115 115 err = -EBADF; 116 116 break; 117 117 } 118 - if (copy_from_user(&regs, uregs, sizeof regs)) { 118 + if (copy_from_user(&regs, uregs, sizeof(regs))) { 119 119 err = -EFAULT; 120 120 break; 121 121 } 122 122 err = rdmsr_safe_regs_on_cpu(cpu, regs); 123 123 if (err) 124 124 break; 125 - if (copy_to_user(uregs, &regs, sizeof regs)) 125 + if (copy_to_user(uregs, &regs, sizeof(regs))) 126 126 err = -EFAULT; 127 127 break; 128 128 ··· 131 131 err = -EBADF; 132 132 break; 133 133 } 134 - if (copy_from_user(&regs, uregs, sizeof regs)) { 134 + if (copy_from_user(&regs, uregs, sizeof(regs))) { 135 135 err = -EFAULT; 136 136 break; 137 137 } 138 138 err = wrmsr_safe_regs_on_cpu(cpu, regs); 139 139 if (err) 140 140 break; 141 - if (copy_to_user(uregs, &regs, sizeof regs)) 141 + if (copy_to_user(uregs, &regs, sizeof(regs))) 142 142 err = -EFAULT; 143 143 break; 144 144
+8 -20
arch/x86/kernel/paravirt.c
··· 56 56 ".type _paravirt_nop, @function\n\t" 57 57 ".popsection"); 58 58 59 - /* identity function, which can be inlined */ 60 - u32 notrace _paravirt_ident_32(u32 x) 61 - { 62 - return x; 63 - } 64 - 65 - u64 notrace _paravirt_ident_64(u64 x) 66 - { 67 - return x; 68 - } 69 - 70 59 void __init default_banner(void) 71 60 { 72 61 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", ··· 91 102 } 92 103 93 104 #ifdef CONFIG_PARAVIRT_XXL 105 + /* identity function, which can be inlined */ 106 + u64 notrace _paravirt_ident_64(u64 x) 107 + { 108 + return x; 109 + } 110 + 94 111 static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 95 112 unsigned long addr, unsigned len) 96 113 { ··· 141 146 else if (opfunc == _paravirt_nop) 142 147 ret = 0; 143 148 149 + #ifdef CONFIG_PARAVIRT_XXL 144 150 /* identity functions just return their single argument */ 145 - else if (opfunc == _paravirt_ident_32) 146 - ret = paravirt_patch_ident_32(insnbuf, len); 147 151 else if (opfunc == _paravirt_ident_64) 148 152 ret = paravirt_patch_ident_64(insnbuf, len); 149 153 150 - #ifdef CONFIG_PARAVIRT_XXL 151 154 else if (type == PARAVIRT_PATCH(cpu.iret) || 152 155 type == PARAVIRT_PATCH(cpu.usergs_sysret64)) 153 156 /* If operation requires a jmp, then jmp */ ··· 302 309 #endif 303 310 }; 304 311 305 - #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 306 - /* 32-bit pagetable entries */ 307 - #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) 308 - #else 309 312 /* 64-bit pagetable entries */ 310 313 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) 311 - #endif 312 314 313 315 struct paravirt_patch_template pv_ops = { 314 316 /* Init ops. */ ··· 471 483 NOKPROBE_SYMBOL(native_load_idt); 472 484 #endif 473 485 474 - EXPORT_SYMBOL_GPL(pv_ops); 486 + EXPORT_SYMBOL(pv_ops); 475 487 EXPORT_SYMBOL_GPL(pv_info);
+6 -12
arch/x86/kernel/paravirt_patch_32.c
··· 10 10 DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); 11 11 DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); 12 12 DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); 13 - #endif 14 - 15 - #if defined(CONFIG_PARAVIRT_SPINLOCKS) 16 - DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); 17 - DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); 18 - #endif 19 - 20 - unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) 21 - { 22 - /* arg in %eax, return in %eax */ 23 - return 0; 24 - } 25 13 26 14 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) 27 15 { 28 16 /* arg in %edx:%eax, return in %edx:%eax */ 29 17 return 0; 30 18 } 19 + #endif 20 + 21 + #if defined(CONFIG_PARAVIRT_SPINLOCKS) 22 + DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); 23 + DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); 24 + #endif 31 25 32 26 extern bool pv_is_native_spin_unlock(void); 33 27 extern bool pv_is_native_vcpu_is_preempted(void);
+6 -14
arch/x86/kernel/paravirt_patch_64.c
··· 15 15 16 16 DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); 17 17 DEF_NATIVE(cpu, swapgs, "swapgs"); 18 - #endif 19 - 20 - DEF_NATIVE(, mov32, "mov %edi, %eax"); 21 18 DEF_NATIVE(, mov64, "mov %rdi, %rax"); 22 - 23 - #if defined(CONFIG_PARAVIRT_SPINLOCKS) 24 - DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); 25 - DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); 26 - #endif 27 - 28 - unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) 29 - { 30 - return paravirt_patch_insns(insnbuf, len, 31 - start__mov32, end__mov32); 32 - } 33 19 34 20 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) 35 21 { 36 22 return paravirt_patch_insns(insnbuf, len, 37 23 start__mov64, end__mov64); 38 24 } 25 + #endif 26 + 27 + #if defined(CONFIG_PARAVIRT_SPINLOCKS) 28 + DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); 29 + DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); 30 + #endif 39 31 40 32 extern bool pv_is_native_spin_unlock(void); 41 33 extern bool pv_is_native_vcpu_is_preempted(void);
+2 -2
arch/x86/kernel/process_64.c
··· 701 701 current->mm->context.ia32_compat = TIF_X32; 702 702 current->personality &= ~READ_IMPLIES_EXEC; 703 703 /* 704 - * in_compat_syscall() uses the presence of the x32 syscall bit 704 + * in_32bit_syscall() uses the presence of the x32 syscall bit 705 705 * flag to determine compat status. The x86 mmap() code relies on 706 706 * the syscall bitness so set x32 syscall bit right here to make 707 - * in_compat_syscall() work during exec(). 707 + * in_32bit_syscall() work during exec(). 708 708 * 709 709 * Pretend to come from a x32 execve. 710 710 */
+6 -5
arch/x86/kernel/sys_x86_64.c
··· 105 105 static void find_start_end(unsigned long addr, unsigned long flags, 106 106 unsigned long *begin, unsigned long *end) 107 107 { 108 - if (!in_compat_syscall() && (flags & MAP_32BIT)) { 108 + if (!in_32bit_syscall() && (flags & MAP_32BIT)) { 109 109 /* This is usually used needed to map code in small 110 110 model, so it needs to be in the first 31bit. Limit 111 111 it to that. This means we need to move the ··· 122 122 } 123 123 124 124 *begin = get_mmap_base(1); 125 - if (in_compat_syscall()) 125 + if (in_32bit_syscall()) 126 126 *end = task_size_32bit(); 127 127 else 128 128 *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); ··· 193 193 return addr; 194 194 195 195 /* for MAP_32BIT mappings we force the legacy mmap base */ 196 - if (!in_compat_syscall() && (flags & MAP_32BIT)) 196 + if (!in_32bit_syscall() && (flags & MAP_32BIT)) 197 197 goto bottomup; 198 198 199 199 /* requesting a specific address */ ··· 217 217 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 218 218 * in the full address space. 219 219 * 220 - * !in_compat_syscall() check to avoid high addresses for x32. 220 + * !in_32bit_syscall() check to avoid high addresses for x32 221 + * (and make it no op on native i386). 221 222 */ 222 - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) 223 + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) 223 224 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; 224 225 225 226 info.align_mask = 0;
+1 -1
arch/x86/kernel/traps.c
··· 306 306 die(message, regs, 0); 307 307 308 308 /* Be absolutely certain we don't return. */ 309 - panic(message); 309 + panic("%s", message); 310 310 } 311 311 #endif 312 312
+11 -11
arch/x86/kvm/emulate.c
··· 1509 1509 return emulate_gp(ctxt, index << 3 | 0x2); 1510 1510 1511 1511 addr = dt.address + index * 8; 1512 - return linear_read_system(ctxt, addr, desc, sizeof *desc); 1512 + return linear_read_system(ctxt, addr, desc, sizeof(*desc)); 1513 1513 } 1514 1514 1515 1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, ··· 1522 1522 struct desc_struct desc; 1523 1523 u16 sel; 1524 1524 1525 - memset (dt, 0, sizeof *dt); 1525 + memset(dt, 0, sizeof(*dt)); 1526 1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1527 1527 VCPU_SREG_LDTR)) 1528 1528 return; ··· 1586 1586 if (rc != X86EMUL_CONTINUE) 1587 1587 return rc; 1588 1588 1589 - return linear_write_system(ctxt, addr, desc, sizeof *desc); 1589 + return linear_write_system(ctxt, addr, desc, sizeof(*desc)); 1590 1590 } 1591 1591 1592 1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ··· 1604 1604 u16 dummy; 1605 1605 u32 base3 = 0; 1606 1606 1607 - memset(&seg_desc, 0, sizeof seg_desc); 1607 + memset(&seg_desc, 0, sizeof(seg_desc)); 1608 1608 1609 1609 if (ctxt->mode == X86EMUL_MODE_REAL) { 1610 1610 /* set real mode segment descriptor (keep limit etc. for ··· 3075 3075 int ret; 3076 3076 u32 new_tss_base = get_desc_base(new_desc); 3077 3077 3078 - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3078 + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3079 3079 if (ret != X86EMUL_CONTINUE) 3080 3080 return ret; 3081 3081 3082 3082 save_state_to_tss16(ctxt, &tss_seg); 3083 3083 3084 - ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3084 + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3085 3085 if (ret != X86EMUL_CONTINUE) 3086 3086 return ret; 3087 3087 3088 - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); 3088 + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3089 3089 if (ret != X86EMUL_CONTINUE) 3090 3090 return ret; 3091 3091 ··· 3094 3094 3095 3095 ret = linear_write_system(ctxt, new_tss_base, 3096 3096 &tss_seg.prev_task_link, 3097 - sizeof tss_seg.prev_task_link); 3097 + sizeof(tss_seg.prev_task_link)); 3098 3098 if (ret != X86EMUL_CONTINUE) 3099 3099 return ret; 3100 3100 } ··· 3216 3216 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3217 3217 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3218 3218 3219 - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3219 + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3220 3220 if (ret != X86EMUL_CONTINUE) 3221 3221 return ret; 3222 3222 ··· 3228 3228 if (ret != X86EMUL_CONTINUE) 3229 3229 return ret; 3230 3230 3231 - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); 3231 + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3232 3232 if (ret != X86EMUL_CONTINUE) 3233 3233 return ret; 3234 3234 ··· 3237 3237 3238 3238 ret = linear_write_system(ctxt, new_tss_base, 3239 3239 &tss_seg.prev_task_link, 3240 - sizeof tss_seg.prev_task_link); 3240 + sizeof(tss_seg.prev_task_link)); 3241 3241 if (ret != X86EMUL_CONTINUE) 3242 3242 return ret; 3243 3243 }
+1 -1
arch/x86/kvm/lapic.c
··· 2409 2409 r = kvm_apic_state_fixup(vcpu, s, true); 2410 2410 if (r) 2411 2411 return r; 2412 - memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 2412 + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); 2413 2413 2414 2414 recalculate_apic_map(vcpu->kvm); 2415 2415 kvm_apic_set_version(vcpu);
+21 -21
arch/x86/kvm/x86.c
··· 2924 2924 unsigned size; 2925 2925 2926 2926 r = -EFAULT; 2927 - if (copy_from_user(&msrs, user_msrs, sizeof msrs)) 2927 + if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 2928 2928 goto out; 2929 2929 2930 2930 r = -E2BIG; ··· 3091 3091 unsigned n; 3092 3092 3093 3093 r = -EFAULT; 3094 - if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) 3094 + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 3095 3095 goto out; 3096 3096 n = msr_list.nmsrs; 3097 3097 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 3098 - if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 3098 + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 3099 3099 goto out; 3100 3100 r = -E2BIG; 3101 3101 if (n < msr_list.nmsrs) ··· 3117 3117 struct kvm_cpuid2 cpuid; 3118 3118 3119 3119 r = -EFAULT; 3120 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3120 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3121 3121 goto out; 3122 3122 3123 3123 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ··· 3126 3126 goto out; 3127 3127 3128 3128 r = -EFAULT; 3129 - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3129 + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 3130 3130 goto out; 3131 3131 r = 0; 3132 3132 break; ··· 3894 3894 struct kvm_interrupt irq; 3895 3895 3896 3896 r = -EFAULT; 3897 - if (copy_from_user(&irq, argp, sizeof irq)) 3897 + if (copy_from_user(&irq, argp, sizeof(irq))) 3898 3898 goto out; 3899 3899 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 3900 3900 break; ··· 3912 3912 struct kvm_cpuid cpuid; 3913 3913 3914 3914 r = -EFAULT; 3915 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3915 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3916 3916 goto out; 3917 3917 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3918 3918 break; ··· 3922 3922 struct kvm_cpuid2 cpuid; 3923 3923 3924 3924 r = -EFAULT; 3925 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3925 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3926 3926 goto out; 3927 3927 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 3928 3928 cpuid_arg->entries); ··· 3933 3933 struct kvm_cpuid2 cpuid; 3934 3934 3935 3935 r = -EFAULT; 3936 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3936 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3937 3937 goto out; 3938 3938 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 3939 3939 cpuid_arg->entries); 3940 3940 if (r) 3941 3941 goto out; 3942 3942 r = -EFAULT; 3943 - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3943 + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 3944 3944 goto out; 3945 3945 r = 0; 3946 3946 break; ··· 3961 3961 struct kvm_tpr_access_ctl tac; 3962 3962 3963 3963 r = -EFAULT; 3964 - if (copy_from_user(&tac, argp, sizeof tac)) 3964 + if (copy_from_user(&tac, argp, sizeof(tac))) 3965 3965 goto out; 3966 3966 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 3967 3967 if (r) 3968 3968 goto out; 3969 3969 r = -EFAULT; 3970 - if (copy_to_user(argp, &tac, sizeof tac)) 3970 + if (copy_to_user(argp, &tac, sizeof(tac))) 3971 3971 goto out; 3972 3972 r = 0; 3973 3973 break; ··· 3980 3980 if (!lapic_in_kernel(vcpu)) 3981 3981 goto out; 3982 3982 r = -EFAULT; 3983 - if (copy_from_user(&va, argp, sizeof va)) 3983 + if (copy_from_user(&va, argp, sizeof(va))) 3984 3984 goto out; 3985 3985 idx = srcu_read_lock(&vcpu->kvm->srcu); 3986 3986 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); ··· 3991 3991 u64 mcg_cap; 3992 3992 3993 3993 r = -EFAULT; 3994 - if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) 3994 + if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 3995 3995 goto out; 3996 3996 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 3997 3997 break; ··· 4000 4000 struct kvm_x86_mce mce; 4001 4001 4002 4002 r = -EFAULT; 4003 - if (copy_from_user(&mce, argp, sizeof mce)) 4003 + if (copy_from_user(&mce, argp, sizeof(mce))) 4004 4004 goto out; 4005 4005 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 4006 4006 break; ··· 4536 4536 if (kvm->created_vcpus) 4537 4537 goto set_identity_unlock; 4538 4538 r = -EFAULT; 4539 - if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) 4539 + if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 4540 4540 goto set_identity_unlock; 4541 4541 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 4542 4542 set_identity_unlock: ··· 4620 4620 if (r) 4621 4621 goto get_irqchip_out; 4622 4622 r = -EFAULT; 4623 - if (copy_to_user(argp, chip, sizeof *chip)) 4623 + if (copy_to_user(argp, chip, sizeof(*chip))) 4624 4624 goto get_irqchip_out; 4625 4625 r = 0; 4626 4626 get_irqchip_out: ··· 4666 4666 } 4667 4667 case KVM_SET_PIT: { 4668 4668 r = -EFAULT; 4669 - if (copy_from_user(&u.ps, argp, sizeof u.ps)) 4669 + if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 4670 4670 goto out; 4671 4671 r = -ENXIO; 4672 4672 if (!kvm->arch.vpit) ··· 8205 8205 sregs->efer = vcpu->arch.efer; 8206 8206 sregs->apic_base = kvm_get_apic_base(vcpu); 8207 8207 8208 - memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 8208 + memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); 8209 8209 8210 8210 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 8211 8211 set_bit(vcpu->arch.interrupt.nr, ··· 8509 8509 fpu->last_opcode = fxsave->fop; 8510 8510 fpu->last_ip = fxsave->rip; 8511 8511 fpu->last_dp = fxsave->rdp; 8512 - memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 8512 + memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 8513 8513 8514 8514 vcpu_put(vcpu); 8515 8515 return 0; ··· 8530 8530 fxsave->fop = fpu->last_opcode; 8531 8531 fxsave->rip = fpu->last_ip; 8532 8532 fxsave->rdp = fpu->last_dp; 8533 - memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 8533 + memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 8534 8534 8535 8535 vcpu_put(vcpu); 8536 8536 return 0;
+2 -2
arch/x86/mm/hugetlbpage.c
··· 92 92 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 93 93 * in the full address space. 94 94 */ 95 - info.high_limit = in_compat_syscall() ? 95 + info.high_limit = in_32bit_syscall() ? 96 96 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); 97 97 98 98 info.align_mask = PAGE_MASK & ~huge_page_mask(h); ··· 116 116 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 117 117 * in the full address space. 118 118 */ 119 - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) 119 + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) 120 120 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; 121 121 122 122 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+1 -1
arch/x86/mm/mmap.c
··· 166 166 struct mm_struct *mm = current->mm; 167 167 168 168 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES 169 - if (in_compat_syscall()) { 169 + if (in_32bit_syscall()) { 170 170 return is_legacy ? mm->mmap_compat_legacy_base 171 171 : mm->mmap_compat_base; 172 172 }
+10 -2
arch/x86/mm/numa_emulation.c
··· 399 399 n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); 400 400 ret = -1; 401 401 for_each_node_mask(i, physnode_mask) { 402 + /* 403 + * The reason we pass in blk[0] is due to 404 + * numa_remove_memblk_from() called by 405 + * emu_setup_memblk() will delete entry 0 406 + * and then move everything else up in the pi.blk 407 + * array. Therefore we should always be looking 408 + * at blk[0]. 409 + */ 402 410 ret = split_nodes_size_interleave_uniform(&ei, &pi, 403 - pi.blk[i].start, pi.blk[i].end, 0, 404 - n, &pi.blk[i], nid); 411 + pi.blk[0].start, pi.blk[0].end, 0, 412 + n, &pi.blk[0], nid); 405 413 if (ret < 0) 406 414 break; 407 415 if (ret < n) {
+5 -1
arch/x86/mm/pageattr.c
··· 2309 2309 2310 2310 /* 2311 2311 * We should perform an IPI and flush all tlbs, 2312 - * but that can deadlock->flush only current cpu: 2312 + * but that can deadlock->flush only current cpu. 2313 + * Preemption needs to be disabled around __flush_tlb_all() due to 2314 + * CR3 reload in __native_flush_tlb(). 2313 2315 */ 2316 + preempt_disable(); 2314 2317 __flush_tlb_all(); 2318 + preempt_enable(); 2315 2319 2316 2320 arch_flush_lazy_mmu_mode(); 2317 2321 }
+2 -2
arch/x86/tools/relocs.c
··· 130 130 REG_EXTENDED|REG_NOSUB); 131 131 132 132 if (err) { 133 - regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf); 133 + regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf)); 134 134 die("%s", errbuf); 135 135 } 136 136 } ··· 405 405 } 406 406 for (i = 0; i < ehdr.e_shnum; i++) { 407 407 struct section *sec = &secs[i]; 408 - if (fread(&shdr, sizeof shdr, 1, fp) != 1) 408 + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) 409 409 die("Cannot read ELF section headers %d/%d: %s\n", 410 410 i, ehdr.e_shnum, strerror(errno)); 411 411 sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
+1 -1
arch/x86/um/asm/elf.h
··· 194 194 195 195 typedef unsigned long elf_greg_t; 196 196 197 - #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 197 + #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) 198 198 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 199 199 200 200 typedef struct user_i387_struct elf_fpregset_t;
+4 -12
drivers/firmware/efi/efivars.c
··· 229 229 return 0; 230 230 } 231 231 232 - static inline bool is_compat(void) 233 - { 234 - if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) 235 - return true; 236 - 237 - return false; 238 - } 239 - 240 232 static void 241 233 copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) 242 234 { ··· 255 263 u8 *data; 256 264 int err; 257 265 258 - if (is_compat()) { 266 + if (in_compat_syscall()) { 259 267 struct compat_efi_variable *compat; 260 268 261 269 if (count != sizeof(*compat)) ··· 316 324 &entry->var.DataSize, entry->var.Data)) 317 325 return -EIO; 318 326 319 - if (is_compat()) { 327 + if (in_compat_syscall()) { 320 328 compat = (struct compat_efi_variable *)buf; 321 329 322 330 size = sizeof(*compat); ··· 410 418 struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; 411 419 struct efi_variable *new_var = (struct efi_variable *)buf; 412 420 struct efivar_entry *new_entry; 413 - bool need_compat = is_compat(); 421 + bool need_compat = in_compat_syscall(); 414 422 efi_char16_t *name; 415 423 unsigned long size; 416 424 u32 attributes; ··· 487 495 if (!capable(CAP_SYS_ADMIN)) 488 496 return -EACCES; 489 497 490 - if (is_compat()) { 498 + if (in_compat_syscall()) { 491 499 if (count != sizeof(*compat)) 492 500 return -EINVAL; 493 501
+2 -2
include/linux/compat.h
··· 1032 1032 #else /* !CONFIG_COMPAT */ 1033 1033 1034 1034 #define is_compat_task() (0) 1035 - #ifndef in_compat_syscall 1035 + /* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ 1036 + #define in_compat_syscall in_compat_syscall 1036 1037 static inline bool in_compat_syscall(void) { return false; } 1037 - #endif 1038 1038 1039 1039 #endif /* CONFIG_COMPAT */ 1040 1040
+1 -1
kernel/time/time.c
··· 842 842 ts->tv_sec = kts.tv_sec; 843 843 844 844 /* Zero out the padding for 32 bit systems or in compat mode */ 845 - if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())) 845 + if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall()) 846 846 kts.tv_nsec &= 0xFFFFFFFFUL; 847 847 848 848 ts->tv_nsec = kts.tv_nsec;
-2
net/xfrm/xfrm_state.c
··· 2077 2077 struct xfrm_mgr *km; 2078 2078 struct xfrm_policy *pol = NULL; 2079 2079 2080 - #ifdef CONFIG_COMPAT 2081 2080 if (in_compat_syscall()) 2082 2081 return -EOPNOTSUPP; 2083 - #endif 2084 2082 2085 2083 if (!optval && !optlen) { 2086 2084 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
-2
net/xfrm/xfrm_user.c
··· 2621 2621 const struct xfrm_link *link; 2622 2622 int type, err; 2623 2623 2624 - #ifdef CONFIG_COMPAT 2625 2624 if (in_compat_syscall()) 2626 2625 return -EOPNOTSUPP; 2627 - #endif 2628 2626 2629 2627 type = nlh->nlmsg_type; 2630 2628 if (type > XFRM_MSG_MAX)
+32 -6
tools/objtool/check.c
··· 836 836 struct symbol *pfunc = insn->func->pfunc; 837 837 unsigned int prev_offset = 0; 838 838 839 - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { 839 + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) { 840 840 if (rela == next_table) 841 841 break; 842 842 ··· 926 926 { 927 927 struct rela *text_rela, *rodata_rela; 928 928 struct instruction *orig_insn = insn; 929 + struct section *rodata_sec; 929 930 unsigned long table_offset; 930 931 931 932 /* ··· 954 953 /* look for a relocation which references .rodata */ 955 954 text_rela = find_rela_by_dest_range(insn->sec, insn->offset, 956 955 insn->len); 957 - if (!text_rela || text_rela->sym != file->rodata->sym) 956 + if (!text_rela || text_rela->sym->type != STT_SECTION || 957 + !text_rela->sym->sec->rodata) 958 958 continue; 959 959 960 960 table_offset = text_rela->addend; 961 + rodata_sec = text_rela->sym->sec; 962 + 961 963 if (text_rela->type == R_X86_64_PC32) 962 964 table_offset += 4; 963 965 ··· 968 964 * Make sure the .rodata address isn't associated with a 969 965 * symbol. gcc jump tables are anonymous data. 970 966 */ 971 - if (find_symbol_containing(file->rodata, table_offset)) 967 + if (find_symbol_containing(rodata_sec, table_offset)) 972 968 continue; 973 969 974 - rodata_rela = find_rela_by_dest(file->rodata, table_offset); 970 + rodata_rela = find_rela_by_dest(rodata_sec, table_offset); 975 971 if (rodata_rela) { 976 972 /* 977 973 * Use of RIP-relative switch jumps is quite rare, and ··· 1056 1052 struct symbol *func; 1057 1053 int ret; 1058 1054 1059 - if (!file->rodata || !file->rodata->rela) 1055 + if (!file->rodata) 1060 1056 return 0; 1061 1057 1062 1058 for_each_sec(file, sec) { ··· 1202 1198 return 0; 1203 1199 } 1204 1200 1201 + static void mark_rodata(struct objtool_file *file) 1202 + { 1203 + struct section *sec; 1204 + bool found = false; 1205 + 1206 + /* 1207 + * This searches for the .rodata section or multiple .rodata.func_name 1208 + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8 1209 + * rodata sections are ignored as they don't contain jump tables. 1210 + */ 1211 + for_each_sec(file, sec) { 1212 + if (!strncmp(sec->name, ".rodata", 7) && 1213 + !strstr(sec->name, ".str1.")) { 1214 + sec->rodata = true; 1215 + found = true; 1216 + } 1217 + } 1218 + 1219 + file->rodata = found; 1220 + } 1221 + 1205 1222 static int decode_sections(struct objtool_file *file) 1206 1223 { 1207 1224 int ret; 1225 + 1226 + mark_rodata(file); 1208 1227 1209 1228 ret = decode_instructions(file); 1210 1229 if (ret) ··· 2198 2171 INIT_LIST_HEAD(&file.insn_list); 2199 2172 hash_init(file.insn_hash); 2200 2173 file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); 2201 - file.rodata = find_section_by_name(file.elf, ".rodata"); 2202 2174 file.c_file = find_section_by_name(file.elf, ".comment"); 2203 2175 file.ignore_unreachables = no_unreachable; 2204 2176 file.hints = false;
+2 -2
tools/objtool/check.h
··· 60 60 struct elf *elf; 61 61 struct list_head insn_list; 62 62 DECLARE_HASHTABLE(insn_hash, 16); 63 - struct section *rodata, *whitelist; 64 - bool ignore_unreachables, c_file, hints; 63 + struct section *whitelist; 64 + bool ignore_unreachables, c_file, hints, rodata; 65 65 }; 66 66 67 67 int check(const char *objname, bool orc);
+2 -1
tools/objtool/elf.c
··· 301 301 if (sym->type != STT_FUNC) 302 302 continue; 303 303 sym->pfunc = sym->cfunc = sym; 304 - coldstr = strstr(sym->name, ".cold."); 304 + coldstr = strstr(sym->name, ".cold"); 305 305 if (!coldstr) 306 306 continue; 307 307 ··· 379 379 rela->offset = rela->rela.r_offset; 380 380 symndx = GELF_R_SYM(rela->rela.r_info); 381 381 rela->sym = find_symbol_by_index(elf, symndx); 382 + rela->rela_sec = sec; 382 383 if (!rela->sym) { 383 384 WARN("can't find rela entry symbol %d for %s", 384 385 symndx, sec->name);
+2 -1
tools/objtool/elf.h
··· 48 48 char *name; 49 49 int idx; 50 50 unsigned int len; 51 - bool changed, text; 51 + bool changed, text, rodata; 52 52 }; 53 53 54 54 struct symbol { ··· 68 68 struct list_head list; 69 69 struct hlist_node hash; 70 70 GElf_Rela rela; 71 + struct section *rela_sec; 71 72 struct symbol *sym; 72 73 unsigned int type; 73 74 unsigned long offset;