Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"This update contains:

- the manual revert of the SYSCALL32 changes which caused a
regression

- a fix for the MPX vma handling

- three fixes for the ioremap 'is ram' checks.

- PAT warning fixes

- a trivial fix for the size calculation of TLB tracepoints

- handle old EFI structures gracefully

This also contains a PAT fix from Jan plus a revert thereof. Toshi
explained why the code is correct"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm/pat: Revert 'Adjust default caching mode translation tables'
x86/asm/entry/32: Revert 'Do not use R9 in SYSCALL32' commit
x86/mm: Fix newly introduced printk format warnings
mm: Fix bugs in region_is_ram()
x86/mm: Remove region_is_ram() call from ioremap
x86/mm: Move warning from __ioremap_check_ram() to the call site
x86/mm/pat, drivers/media/ivtv: Move the PAT warning and replace WARN() with pr_warn()
x86/mm/pat, drivers/infiniband/ipath: Replace WARN() with pr_warn()
x86/mm/pat: Adjust default caching mode translation tables
x86/fpu: Disable dependent CPU features on "noxsave"
x86/mpx: Do not set ->vm_ops on MPX VMAs
x86/mm: Add parenthesis for TLB tracepoint size calculation
efi: Handle memory error structures produced based on old versions of standard

+81 -59
+9 -5
arch/x86/entry/entry_64_compat.S
··· 205 205 movl RDX(%rsp), %edx /* arg3 */ 206 206 movl RSI(%rsp), %ecx /* arg4 */ 207 207 movl RDI(%rsp), %r8d /* arg5 */ 208 - movl %ebp, %r9d /* arg6 */ 209 208 .endm 210 209 211 210 .macro auditsys_exit exit ··· 235 236 236 237 sysenter_auditsys: 237 238 auditsys_entry_common 239 + movl %ebp, %r9d /* reload 6th syscall arg */ 238 240 jmp sysenter_dispatch 239 241 240 242 sysexit_audit: ··· 336 336 * 32-bit zero extended: 337 337 */ 338 338 ASM_STAC 339 - 1: movl (%r8), %ebp 339 + 1: movl (%r8), %r9d 340 340 _ASM_EXTABLE(1b, ia32_badarg) 341 341 ASM_CLAC 342 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) ··· 346 346 cstar_do_call: 347 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 348 348 movl %edi, %r8d /* arg5 */ 349 - movl %ebp, %r9d /* arg6 */ 349 + /* r9 already loaded */ /* arg6 */ 350 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 351 351 movl %ebx, %edi /* arg1 */ 352 352 movl %edx, %edx /* arg3 (zero extension) */ ··· 358 358 call *ia32_sys_call_table(, %rax, 8) 359 359 movq %rax, RAX(%rsp) 360 360 1: 361 - movl RCX(%rsp), %ebp 362 361 DISABLE_INTERRUPTS(CLBR_NONE) 363 362 TRACE_IRQS_OFF 364 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) ··· 391 392 392 393 #ifdef CONFIG_AUDITSYSCALL 393 394 cstar_auditsys: 395 + movl %r9d, R9(%rsp) /* register to be clobbered by call */ 394 396 auditsys_entry_common 397 + movl R9(%rsp), %r9d /* reload 6th syscall arg */ 395 398 jmp cstar_dispatch 396 399 397 400 sysretl_audit: ··· 405 404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 406 405 jz cstar_auditsys 407 406 #endif 407 + xchgl %r9d, %ebp 408 408 SAVE_EXTRA_REGS 409 409 xorl %eax, %eax /* Do not leak kernel information */ 410 410 movq %rax, R11(%rsp) 411 411 movq %rax, R10(%rsp) 412 - movq %rax, R9(%rsp) 412 + movq %r9, R9(%rsp) 413 413 movq %rax, R8(%rsp) 414 414 movq %rsp, %rdi /* &pt_regs -> arg1 */ 415 415 call syscall_trace_enter 416 + movl R9(%rsp), %r9d 416 417 417 418 /* Reload arg registers from stack. (see sysenter_tracesys) */ 418 419 movl RCX(%rsp), %ecx ··· 424 421 movl %eax, %eax /* zero extension */ 425 422 426 423 RESTORE_EXTRA_REGS 424 + xchgl %ebp, %r9d 427 425 jmp cstar_do_call 428 426 END(entry_SYSCALL_compat) 429 427
+6
arch/x86/kernel/fpu/init.c
··· 351 351 352 352 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 353 353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 354 + setup_clear_cpu_cap(X86_FEATURE_XSAVEC); 354 355 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 355 356 setup_clear_cpu_cap(X86_FEATURE_AVX); 356 357 setup_clear_cpu_cap(X86_FEATURE_AVX2); 358 + setup_clear_cpu_cap(X86_FEATURE_AVX512F); 359 + setup_clear_cpu_cap(X86_FEATURE_AVX512PF); 360 + setup_clear_cpu_cap(X86_FEATURE_AVX512ER); 361 + setup_clear_cpu_cap(X86_FEATURE_AVX512CD); 362 + setup_clear_cpu_cap(X86_FEATURE_MPX); 357 363 358 364 return 1; 359 365 }
+6 -17
arch/x86/mm/ioremap.c
··· 63 63 !PageReserved(pfn_to_page(start_pfn + i))) 64 64 return 1; 65 65 66 - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); 67 - 68 66 return 0; 69 67 } 70 68 ··· 92 94 pgprot_t prot; 93 95 int retval; 94 96 void __iomem *ret_addr; 95 - int ram_region; 96 97 97 98 /* Don't allow wraparound or zero size */ 98 99 last_addr = phys_addr + size - 1; ··· 114 117 /* 115 118 * Don't allow anybody to remap normal RAM that we're using.. 116 119 */ 117 - /* First check if whole region can be identified as RAM or not */ 118 - ram_region = region_is_ram(phys_addr, size); 119 - if (ram_region > 0) { 120 - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 121 - (unsigned long int)phys_addr, 122 - (unsigned long int)last_addr); 120 + pfn = phys_addr >> PAGE_SHIFT; 121 + last_pfn = last_addr >> PAGE_SHIFT; 122 + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 123 + __ioremap_check_ram) == 1) { 124 + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 125 + &phys_addr, &last_addr); 123 126 return NULL; 124 127 } 125 128 126 - /* If could not be identified(-1), check page by page */ 127 - if (ram_region < 0) { 128 - pfn = phys_addr >> PAGE_SHIFT; 129 - last_pfn = last_addr >> PAGE_SHIFT; 130 - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 131 - __ioremap_check_ram) == 1) 132 - return NULL; 133 - } 134 129 /* 135 130 * Mappings have to be page-aligned 136 131 */
+7
arch/x86/mm/mmap.c
··· 126 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 127 127 } 128 128 } 129 + 130 + const char *arch_vma_name(struct vm_area_struct *vma) 131 + { 132 + if (vma->vm_flags & VM_MPX) 133 + return "[mpx]"; 134 + return NULL; 135 + }
+3 -21
arch/x86/mm/mpx.c
··· 20 20 #define CREATE_TRACE_POINTS 21 21 #include <asm/trace/mpx.h> 22 22 23 - static const char *mpx_mapping_name(struct vm_area_struct *vma) 24 - { 25 - return "[mpx]"; 26 - } 27 - 28 - static struct vm_operations_struct mpx_vma_ops = { 29 - .name = mpx_mapping_name, 30 - }; 31 - 32 - static int is_mpx_vma(struct vm_area_struct *vma) 33 - { 34 - return (vma->vm_ops == &mpx_vma_ops); 35 - } 36 - 37 23 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 38 24 { 39 25 if (is_64bit_mm(mm)) ··· 39 53 /* 40 54 * This is really a simplified "vm_mmap". it only handles MPX 41 55 * bounds tables (the bounds directory is user-allocated). 42 - * 43 - * Later on, we use the vma->vm_ops to uniquely identify these 44 - * VMAs. 45 56 */ 46 57 static unsigned long mpx_mmap(unsigned long len) 47 58 { ··· 84 101 ret = -ENOMEM; 85 102 goto out; 86 103 } 87 - vma->vm_ops = &mpx_vma_ops; 88 104 89 105 if (vm_flags & VM_LOCKED) { 90 106 up_write(&mm->mmap_sem); ··· 794 812 * so stop immediately and return an error. This 795 813 * probably results in a SIGSEGV. 796 814 */ 797 - if (!is_mpx_vma(vma)) 815 + if (!(vma->vm_flags & VM_MPX)) 798 816 return -EINVAL; 799 817 800 818 len = min(vma->vm_end, end) - addr; ··· 927 945 * lots of tables even though we have no actual table 928 946 * entries in use. 929 947 */ 930 - while (next && is_mpx_vma(next)) 948 + while (next && (next->vm_flags & VM_MPX)) 931 949 next = next->vm_next; 932 - while (prev && is_mpx_vma(prev)) 950 + while (prev && (prev->vm_flags & VM_MPX)) 933 951 prev = prev->vm_prev; 934 952 /* 935 953 * We know 'start' and 'end' lie within an area controlled
+1 -1
arch/x86/mm/tlb.c
··· 117 117 } else { 118 118 unsigned long addr; 119 119 unsigned long nr_pages = 120 - f->flush_end - f->flush_start / PAGE_SIZE; 120 + (f->flush_end - f->flush_start) / PAGE_SIZE; 121 121 addr = f->flush_start; 122 122 while (addr < f->flush_end) { 123 123 __flush_tlb_single(addr);
+12 -3
drivers/firmware/efi/cper.c
··· 305 305 return ret; 306 306 } 307 307 308 - static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308 + static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, 309 + int len) 309 310 { 310 311 struct cper_mem_err_compact cmem; 311 312 313 + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ 314 + if (len == sizeof(struct cper_sec_mem_err_old) && 315 + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { 316 + pr_err(FW_WARN "valid bits set for fields beyond structure\n"); 317 + return; 318 + } 312 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 313 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 314 321 if (mem->validation_bits & CPER_MEM_VALID_PA) ··· 412 405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 413 406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 414 407 printk("%s""section_type: memory error\n", newpfx); 415 - if (gdata->error_data_length >= sizeof(*mem_err)) 416 - cper_print_mem(newpfx, mem_err); 408 + if (gdata->error_data_length >= 409 + sizeof(struct cper_sec_mem_err_old)) 410 + cper_print_mem(newpfx, mem_err, 411 + gdata->error_data_length); 417 412 else 418 413 goto err_section_too_small; 419 414 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+4 -2
drivers/infiniband/hw/ipath/ipath_driver.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 + 34 36 #include <linux/sched.h> 35 37 #include <linux/spinlock.h> 36 38 #include <linux/idr.h> ··· 401 399 u32 bar0 = 0, bar1 = 0; 402 400 403 401 #ifdef CONFIG_X86_64 404 - if (WARN(pat_enabled(), 405 - "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 402 + if (pat_enabled()) { 403 + pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); 406 404 ret = -ENODEV; 407 405 goto bail; 408 406 }
+9 -6
drivers/media/pci/ivtv/ivtvfb.c
··· 38 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 39 39 */ 40 40 41 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 42 + 41 43 #include <linux/module.h> 42 44 #include <linux/kernel.h> 43 45 #include <linux/fb.h> ··· 1173 1171 { 1174 1172 int rc; 1175 1173 1174 + #ifdef CONFIG_X86_64 1175 + if (pat_enabled()) { 1176 + pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); 1177 + return -ENODEV; 1178 + } 1179 + #endif 1180 + 1176 1181 if (itv->osd_info) { 1177 1182 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1178 1183 return -EBUSY; ··· 1274 1265 int registered = 0; 1275 1266 int err; 1276 1267 1277 - #ifdef CONFIG_X86_64 1278 - if (WARN(pat_enabled(), 1279 - "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { 1280 - return -ENODEV; 1281 - } 1282 - #endif 1283 1268 1284 1269 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1285 1270 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
+21 -1
include/linux/cper.h
··· 340 340 __u64 mm_reg_addr; 341 341 }; 342 342 343 - /* Memory Error Section */ 343 + /* Old Memory Error Section UEFI 2.1, 2.2 */ 344 + struct cper_sec_mem_err_old { 345 + __u64 validation_bits; 346 + __u64 error_status; 347 + __u64 physical_addr; 348 + __u64 physical_addr_mask; 349 + __u16 node; 350 + __u16 card; 351 + __u16 module; 352 + __u16 bank; 353 + __u16 device; 354 + __u16 row; 355 + __u16 column; 356 + __u16 bit_pos; 357 + __u64 requestor_id; 358 + __u64 responder_id; 359 + __u64 target_id; 360 + __u8 error_type; 361 + }; 362 + 363 + /* Memory Error Section UEFI >= 2.3 */ 344 364 struct cper_sec_mem_err { 345 365 __u64 validation_bits; 346 366 __u64 error_status;
+3 -3
kernel/resource.c
··· 504 504 { 505 505 struct resource *p; 506 506 resource_size_t end = start + size - 1; 507 - int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 508 508 const char *name = "System RAM"; 509 509 int ret = -1; 510 510 511 511 read_lock(&resource_lock); 512 512 for (p = iomem_resource.child; p ; p = p->sibling) { 513 - if (end < p->start) 513 + if (p->end < start) 514 514 continue; 515 515 516 516 if (p->start <= start && end <= p->end) { ··· 521 521 ret = 1; 522 522 break; 523 523 } 524 - if (p->end < start) 524 + if (end < p->start) 525 525 break; /* not found */ 526 526 } 527 527 read_unlock(&resource_lock);