Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A set of x86 fixes:

- Prevent potential NULL pointer dereferences in the HPET and HyperV
code

- Exclude the GART aperture from /proc/kcore to prevent kernel
crashes on access

- Use the correct macros for Cyrix I/O on Geode processors

- Remove yet another kernel address printk leak

- Announce microcode reload completion as requested by quite some
people. Microcode loading has become popular recently.

- Some 'Make Clang' happy fixlets

- A few cleanups for recently added code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/gart: Exclude GART aperture from kcore
x86/hw_breakpoints: Make default case in hw_breakpoint_arch_parse() return an error
x86/mm/pti: Make local symbols static
x86/cpu/cyrix: Remove {get,set}Cx86_old macros used for Cyrix processors
x86/cpu/cyrix: Use correct macros for Cyrix calls on Geode processors
x86/microcode: Announce reload operation's completion
x86/hyperv: Prevent potential NULL pointer dereference
x86/hpet: Prevent potential NULL pointer dereference
x86/lib: Fix indentation issue, remove extra tab
x86/boot: Restrict header scope to make Clang happy
x86/mm: Don't leak kernel addresses
x86/cpufeature: Fix various quality problems in the <asm/cpu_device_hd.h> header

+81 -58
+2 -1
arch/x86/boot/string.c
··· 13 13 */ 14 14 15 15 #include <linux/types.h> 16 - #include <linux/kernel.h> 16 + #include <linux/compiler.h> 17 17 #include <linux/errno.h> 18 + #include <linux/limits.h> 18 19 #include <asm/asm.h> 19 20 #include "ctype.h" 20 21 #include "string.h"
+5 -1
arch/x86/hyperv/hv_init.c
··· 103 103 u64 msr_vp_index; 104 104 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; 105 105 void **input_arg; 106 + struct page *pg; 106 107 107 108 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 108 - *input_arg = page_address(alloc_page(GFP_KERNEL)); 109 + pg = alloc_page(GFP_KERNEL); 110 + if (unlikely(!pg)) 111 + return -ENOMEM; 112 + *input_arg = page_address(pg); 109 113 110 114 hv_get_vp_index(msr_vp_index); 111 115
+15 -16
arch/x86/include/asm/cpu_device_id.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _CPU_DEVICE_ID 3 - #define _CPU_DEVICE_ID 1 2 + #ifndef _ASM_X86_CPU_DEVICE_ID 3 + #define _ASM_X86_CPU_DEVICE_ID 4 4 5 5 /* 6 6 * Declare drivers belonging to specific x86 CPUs ··· 8 8 */ 9 9 10 10 #include <linux/mod_devicetable.h> 11 - 12 - extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); 13 11 14 12 /* 15 13 * Match specific microcode revisions. ··· 20 22 */ 21 23 22 24 struct x86_cpu_desc { 23 - __u8 x86_family; 24 - __u8 x86_vendor; 25 - __u8 x86_model; 26 - __u8 x86_stepping; 27 - __u32 x86_microcode_rev; 25 + u8 x86_family; 26 + u8 x86_vendor; 27 + u8 x86_model; 28 + u8 x86_stepping; 29 + u32 x86_microcode_rev; 28 30 }; 29 31 30 - #define INTEL_CPU_DESC(mod, step, rev) { \ 31 - .x86_family = 6, \ 32 - .x86_vendor = X86_VENDOR_INTEL, \ 33 - .x86_model = mod, \ 34 - .x86_stepping = step, \ 35 - .x86_microcode_rev = rev, \ 32 + #define INTEL_CPU_DESC(model, stepping, revision) { \ 33 + .x86_family = 6, \ 34 + .x86_vendor = X86_VENDOR_INTEL, \ 35 + .x86_model = (model), \ 36 + .x86_stepping = (stepping), \ 37 + .x86_microcode_rev = (revision), \ 36 38 } 37 39 40 + extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); 38 41 extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); 39 42 40 - #endif 43 + #endif /* _ASM_X86_CPU_DEVICE_ID */
-21
arch/x86/include/asm/processor-cyrix.h
··· 3 3 * NSC/Cyrix CPU indexed register access. Must be inlined instead of 4 4 * macros to ensure correct access ordering 5 5 * Access order is always 0x22 (=offset), 0x23 (=value) 6 - * 7 - * When using the old macros a line like 8 - * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); 9 - * gets expanded to: 10 - * do { 11 - * outb((CX86_CCR2), 0x22); 12 - * outb((({ 13 - * outb((CX86_CCR2), 0x22); 14 - * inb(0x23); 15 - * }) | 0x88), 0x23); 16 - * } while (0); 17 - * 18 - * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23). 19 6 */ 20 7 21 8 static inline u8 getCx86(u8 reg) ··· 16 29 outb(reg, 0x22); 17 30 outb(data, 0x23); 18 31 } 19 - 20 - #define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); }) 21 - 22 - #define setCx86_old(reg, data) do { \ 23 - outb((reg), 0x22); \ 24 - outb((data), 0x23); \ 25 - } while (0) 26 -
+13 -7
arch/x86/kernel/aperture_64.c
··· 14 14 #define pr_fmt(fmt) "AGP: " fmt 15 15 16 16 #include <linux/kernel.h> 17 + #include <linux/kcore.h> 17 18 #include <linux/types.h> 18 19 #include <linux/init.h> 19 20 #include <linux/memblock.h> ··· 58 57 59 58 int fix_aperture __initdata = 1; 60 59 61 - #ifdef CONFIG_PROC_VMCORE 60 + #if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE) 62 61 /* 63 62 * If the first kernel maps the aperture over e820 RAM, the kdump kernel will 64 63 * use the same range because it will remain configured in the northbridge. ··· 67 66 */ 68 67 static unsigned long aperture_pfn_start, aperture_page_count; 69 68 70 - static int gart_oldmem_pfn_is_ram(unsigned long pfn) 69 + static int gart_mem_pfn_is_ram(unsigned long pfn) 71 70 { 72 71 return likely((pfn < aperture_pfn_start) || 73 72 (pfn >= aperture_pfn_start + aperture_page_count)); 74 73 } 75 74 76 - static void exclude_from_vmcore(u64 aper_base, u32 aper_order) 75 + static void __init exclude_from_core(u64 aper_base, u32 aper_order) 77 76 { 78 77 aperture_pfn_start = aper_base >> PAGE_SHIFT; 79 78 aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; 80 - WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); 79 + #ifdef CONFIG_PROC_VMCORE 80 + WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram)); 81 + #endif 82 + #ifdef CONFIG_PROC_KCORE 83 + WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram)); 84 + #endif 81 85 } 82 86 #else 83 - static void exclude_from_vmcore(u64 aper_base, u32 aper_order) 87 + static void exclude_from_core(u64 aper_base, u32 aper_order) 84 88 { 85 89 } 86 90 #endif ··· 480 474 * may have allocated the range over its e820 RAM 481 475 * and fixed up the northbridge 482 476 */ 483 - exclude_from_vmcore(last_aper_base, last_aper_order); 477 + exclude_from_core(last_aper_base, last_aper_order); 484 478 485 479 return 1; 486 480 } ··· 526 520 * overlap with the first kernel's memory. We can't access the 527 521 * range through vmcore even though it should be part of the dump. 528 522 */ 529 - exclude_from_vmcore(aper_alloc, aper_order); 523 + exclude_from_core(aper_alloc, aper_order); 530 524 531 525 /* Fix up the north bridges */ 532 526 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
+7 -7
arch/x86/kernel/cpu/cyrix.c
··· 124 124 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 125 125 126 126 /* Load/Store Serialize to mem access disable (=reorder it) */ 127 - setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); 127 + setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); 128 128 /* set load/store serialize from 1GB to 4GB */ 129 129 ccr3 |= 0xe0; 130 130 setCx86(CX86_CCR3, ccr3); ··· 135 135 pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 136 136 137 137 /* CCR2 bit 2: unlock NW bit */ 138 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); 138 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); 139 139 /* set 'Not Write-through' */ 140 140 write_cr0(read_cr0() | X86_CR0_NW); 141 141 /* CCR2 bit 2: lock NW bit and set WT1 */ 142 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); 142 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 143 143 } 144 144 145 145 /* ··· 153 153 local_irq_save(flags); 154 154 155 155 /* Suspend on halt power saving and enable #SUSP pin */ 156 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); 156 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); 157 157 158 158 ccr3 = getCx86(CX86_CCR3); 159 159 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 160 160 161 161 162 162 /* FPU fast, DTE cache, Mem bypass */ 163 - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); 163 + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); 164 164 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 165 165 166 166 set_cx86_memwb(); ··· 296 296 /* GXm supports extended cpuid levels 'ala' AMD */ 297 297 if (c->cpuid_level == 2) { 298 298 /* Enable cxMMX extensions (GX1 Datasheet 54) */ 299 - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); 299 + setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); 300 300 301 301 /* 302 302 * GXm : 0x30 ... 0x5f GXm datasheet 51 ··· 319 319 if (dir1 > 7) { 320 320 dir0_msn++; /* M II */ 321 321 /* Enable MMX extensions (App note 108) */ 322 - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 322 + setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); 323 323 } else { 324 324 /* A 6x86MX - it has the bug. */ 325 325 set_cpu_bug(c, X86_BUG_COMA);
+2
arch/x86/kernel/cpu/microcode/core.c
··· 608 608 if (ret > 0) 609 609 microcode_check(); 610 610 611 + pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode); 612 + 611 613 return ret; 612 614 } 613 615
+2
arch/x86/kernel/hpet.c
··· 905 905 return 0; 906 906 907 907 hpet_set_mapping(); 908 + if (!hpet_virt_address) 909 + return 0; 908 910 909 911 /* 910 912 * Read the period and check for a sane value:
+1
arch/x86/kernel/hw_breakpoint.c
··· 354 354 #endif 355 355 default: 356 356 WARN_ON_ONCE(1); 357 + return -EINVAL; 357 358 } 358 359 359 360 /*
+2 -2
arch/x86/kernel/mpparse.c
··· 598 598 mpf_base = base; 599 599 mpf_found = true; 600 600 601 - pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", 602 - base, base + sizeof(*mpf) - 1, mpf); 601 + pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n", 602 + base, base + sizeof(*mpf) - 1); 603 603 604 604 memblock_reserve(base, sizeof(*mpf)); 605 605 if (mpf->physptr)
+1 -1
arch/x86/lib/csum-partial_64.c
··· 94 94 : "m" (*(unsigned long *)buff), 95 95 "r" (zero), "0" (result)); 96 96 --count; 97 - buff += 8; 97 + buff += 8; 98 98 } 99 99 result = add32_with_carry(result>>32, 100 100 result&0xffffffff);
+2 -2
arch/x86/mm/pti.c
··· 77 77 pr_info("%s\n", reason); 78 78 } 79 79 80 - enum pti_mode { 80 + static enum pti_mode { 81 81 PTI_AUTO = 0, 82 82 PTI_FORCE_OFF, 83 83 PTI_FORCE_ON ··· 602 602 set_memory_global(start, (end_global - start) >> PAGE_SHIFT); 603 603 } 604 604 605 - void pti_set_kernel_image_nonglobal(void) 605 + static void pti_set_kernel_image_nonglobal(void) 606 606 { 607 607 /* 608 608 * The identity map is created with PMDs, regardless of the
+27
fs/proc/kcore.c
··· 54 54 static DECLARE_RWSEM(kclist_lock); 55 55 static int kcore_need_update = 1; 56 56 57 + /* 58 + * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 59 + * Same as oldmem_pfn_is_ram in vmcore 60 + */ 61 + static int (*mem_pfn_is_ram)(unsigned long pfn); 62 + 63 + int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)) 64 + { 65 + if (mem_pfn_is_ram) 66 + return -EBUSY; 67 + mem_pfn_is_ram = fn; 68 + return 0; 69 + } 70 + 71 + static int pfn_is_ram(unsigned long pfn) 72 + { 73 + if (mem_pfn_is_ram) 74 + return mem_pfn_is_ram(pfn); 75 + else 76 + return 1; 77 + } 78 + 57 79 /* This doesn't grab kclist_lock, so it should only be used at init time. */ 58 80 void __init kclist_add(struct kcore_list *new, void *addr, size_t size, 59 81 int type) ··· 487 465 goto out; 488 466 } 489 467 m = NULL; /* skip the list anchor */ 468 + } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) { 469 + if (clear_user(buffer, tsz)) { 470 + ret = -EFAULT; 471 + goto out; 472 + } 490 473 } else if (m->type == KCORE_VMALLOC) { 491 474 vread(buf, (char *)start, tsz); 492 475 /* we have to zero-fill user buffer even if no read */
+2
include/linux/kcore.h
··· 44 44 m->vaddr = (unsigned long)vaddr; 45 45 kclist_add(m, addr, sz, KCORE_REMAP); 46 46 } 47 + 48 + extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); 47 49 #else 48 50 static inline 49 51 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)