Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"The accumulated fixes from this and last week:

- Fix vmalloc TLB flush and map range calculations which lead to
stale TLBs, spurious faults and other hard to diagnose issues.

- Use fault_in_pages_writable() for prefaulting the user stack in the
FPU code as it's less fragile than the current solution

- Use the PF_KTHREAD flag when checking for a kernel thread instead
of current->mm as the latter can give the wrong answer due to
use_mm()

- Compute the vmemmap size correctly for KASLR and 5-Level paging.
Otherwise this can end up with a way too small vmemmap area.

- Make KASAN and 5-level paging work again by making sure that all
invalid bits are masked out when computing the P4D offset. This
worked before but got broken recently when the LDT remap area was
moved.

- Prevent a NULL pointer dereference in the resource control code
which can be triggered with certain mount options when the
requested resource is not available.

- Enforce ordering of microcode loading vs. perf initialization on
secondary CPUs. Otherwise perf tries to access a non-existing MSR
as the boot CPU marked it as available.

- Don't stop the resource control group walk early otherwise the
control bitmaps are not updated correctly and become inconsistent.

- Unbreak kgdb by returning 0 on success from
kgdb_arch_set_breakpoint() instead of an error code.

- Add more Icelake CPU model defines so depending changes can be
queued in other trees"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback
x86/kasan: Fix boot with 5-level paging and KASAN
x86/fpu: Don't use current->mm to check for a kthread
x86/kgdb: Return 0 from kgdb_arch_set_breakpoint()
x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled
x86/resctrl: Don't stop walking closids when a locksetup group is found
x86/fpu: Update kernel's FPU state before using for the fsave header
x86/mm/KASLR: Compute the size of the vmemmap section properly
x86/fpu: Use fault_in_pages_writeable() for pre-faulting
x86/CPU: Add more Icelake model numbers
mm/vmalloc: Avoid rare case of flushing TLB with weird arguments
mm/vmalloc: Fix calculation of direct map addr range

+45 -24
+3 -3
arch/x86/include/asm/fpu/internal.h
··· 536 536 struct fpu *fpu = &current->thread.fpu; 537 537 int cpu = smp_processor_id(); 538 538 539 - if (WARN_ON_ONCE(current->mm == NULL)) 539 + if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) 540 540 return; 541 541 542 542 if (!fpregs_state_valid(fpu, cpu)) { ··· 567 567 * otherwise. 568 568 * 569 569 * The FPU context is only stored/restored for a user task and 570 - * ->mm is used to distinguish between kernel and user threads. 570 + * PF_KTHREAD is used to distinguish between kernel and user threads. 571 571 */ 572 572 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) 573 573 { 574 - if (static_cpu_has(X86_FEATURE_FPU) && current->mm) { 574 + if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { 575 575 if (!copy_fpregs_to_fpstate(old_fpu)) 576 576 old_fpu->last_cpu = -1; 577 577 else
+3
arch/x86/include/asm/intel-family.h
··· 52 52 53 53 #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 54 54 55 + #define INTEL_FAM6_ICELAKE_X 0x6A 56 + #define INTEL_FAM6_ICELAKE_XEON_D 0x6C 57 + #define INTEL_FAM6_ICELAKE_DESKTOP 0x7D 55 58 #define INTEL_FAM6_ICELAKE_MOBILE 0x7E 56 59 57 60 /* "Small Core" Processors (Atom) */
+1 -1
arch/x86/kernel/cpu/microcode/core.c
··· 872 872 goto out_ucode_group; 873 873 874 874 register_syscore_ops(&mc_syscore_ops); 875 - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 875 + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", 876 876 mc_cpu_online, mc_cpu_down_prep); 877 877 878 878 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
+3
arch/x86/kernel/cpu/resctrl/monitor.c
··· 360 360 struct list_head *head; 361 361 struct rdtgroup *entry; 362 362 363 + if (!is_mbm_local_enabled()) 364 + return; 365 + 363 366 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; 364 367 closid = rgrp->closid; 365 368 rmid = rgrp->mon.rmid;
+6 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 2534 2534 if (closid_allocated(i) && i != closid) { 2535 2535 mode = rdtgroup_mode_by_closid(i); 2536 2536 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2537 - break; 2537 + /* 2538 + * ctrl values for locksetup aren't relevant 2539 + * until the schemata is written, and the mode 2540 + * becomes RDT_MODE_PSEUDO_LOCKED. 2541 + */ 2542 + continue; 2538 2543 /* 2539 2544 * If CDP is active include peer domain's 2540 2545 * usage to ensure there is no overlap
+1 -1
arch/x86/kernel/fpu/core.c
··· 102 102 103 103 kernel_fpu_disable(); 104 104 105 - if (current->mm) { 105 + if (!(current->flags & PF_KTHREAD)) { 106 106 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { 107 107 set_thread_flag(TIF_NEED_FPU_LOAD); 108 108 /*
+7 -9
arch/x86/kernel/fpu/signal.c
··· 5 5 6 6 #include <linux/compat.h> 7 7 #include <linux/cpu.h> 8 + #include <linux/pagemap.h> 8 9 9 10 #include <asm/fpu/internal.h> 10 11 #include <asm/fpu/signal.h> ··· 61 60 struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; 62 61 struct user_i387_ia32_struct env; 63 62 struct _fpstate_32 __user *fp = buf; 63 + 64 + fpregs_lock(); 65 + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) 66 + copy_fxregs_to_kernel(&tsk->thread.fpu); 67 + fpregs_unlock(); 64 68 65 69 convert_from_fxsr(&env, tsk); 66 70 ··· 195 189 fpregs_unlock(); 196 190 197 191 if (ret) { 198 - int aligned_size; 199 - int nr_pages; 200 - 201 - aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size; 202 - nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE); 203 - 204 - ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages, 205 - NULL, FOLL_WRITE); 206 - if (ret == nr_pages) 192 + if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size)) 207 193 goto retry; 208 194 return -EFAULT; 209 195 }
+1 -1
arch/x86/kernel/kgdb.c
··· 758 758 BREAK_INSTR_SIZE); 759 759 bpt->type = BP_POKE_BREAKPOINT; 760 760 761 - return err; 761 + return 0; 762 762 } 763 763 764 764 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+1 -1
arch/x86/mm/kasan_init_64.c
··· 199 199 if (!pgtable_l5_enabled()) 200 200 return (p4d_t *)pgd; 201 201 202 - p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; 202 + p4d = pgd_val(*pgd) & PTE_PFN_MASK; 203 203 p4d += __START_KERNEL_map - phys_base; 204 204 return (p4d_t *)p4d + p4d_index(addr); 205 205 }
+10 -1
arch/x86/mm/kaslr.c
··· 52 52 } kaslr_regions[] = { 53 53 { &page_offset_base, 0 }, 54 54 { &vmalloc_base, 0 }, 55 - { &vmemmap_base, 1 }, 55 + { &vmemmap_base, 0 }, 56 56 }; 57 57 58 58 /* Get size in bytes used by the memory region */ ··· 78 78 unsigned long rand, memory_tb; 79 79 struct rnd_state rand_state; 80 80 unsigned long remain_entropy; 81 + unsigned long vmemmap_size; 81 82 82 83 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; 83 84 vaddr = vaddr_start; ··· 109 108 /* Adapt phyiscal memory region size based on available memory */ 110 109 if (memory_tb < kaslr_regions[0].size_tb) 111 110 kaslr_regions[0].size_tb = memory_tb; 111 + 112 + /* 113 + * Calculate the vmemmap region size in TBs, aligned to a TB 114 + * boundary. 115 + */ 116 + vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) * 117 + sizeof(struct page); 118 + kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT); 112 119 113 120 /* Calculate entropy available between regions */ 114 121 remain_entropy = vaddr_end - vaddr_start;
+1
include/linux/cpuhotplug.h
··· 101 101 CPUHP_AP_IRQ_BCM2836_STARTING, 102 102 CPUHP_AP_IRQ_MIPS_GIC_STARTING, 103 103 CPUHP_AP_ARM_MVEBU_COHERENCY, 104 + CPUHP_AP_MICROCODE_LOADER, 104 105 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 105 106 CPUHP_AP_PERF_X86_STARTING, 106 107 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+8 -6
mm/vmalloc.c
··· 2123 2123 /* Handle removing and resetting vm mappings related to the vm_struct. */ 2124 2124 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2125 2125 { 2126 - unsigned long addr = (unsigned long)area->addr; 2127 2126 unsigned long start = ULONG_MAX, end = 0; 2128 2127 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2128 + int flush_dmap = 0; 2129 2129 int i; 2130 2130 2131 2131 /* ··· 2135 2135 * execute permissions, without leaving a RW+X window. 2136 2136 */ 2137 2137 if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 2138 - set_memory_nx(addr, area->nr_pages); 2139 - set_memory_rw(addr, area->nr_pages); 2138 + set_memory_nx((unsigned long)area->addr, area->nr_pages); 2139 + set_memory_rw((unsigned long)area->addr, area->nr_pages); 2140 2140 } 2141 2141 2142 2142 remove_vm_area(area->addr); ··· 2160 2160 * the vm_unmap_aliases() flush includes the direct map. 2161 2161 */ 2162 2162 for (i = 0; i < area->nr_pages; i++) { 2163 - if (page_address(area->pages[i])) { 2163 + unsigned long addr = (unsigned long)page_address(area->pages[i]); 2164 + if (addr) { 2164 2165 start = min(addr, start); 2165 - end = max(addr, end); 2166 + end = max(addr + PAGE_SIZE, end); 2167 + flush_dmap = 1; 2166 2168 } 2167 2169 } 2168 2170 ··· 2174 2172 * reset the direct map permissions to the default. 2175 2173 */ 2176 2174 set_area_direct_map(area, set_direct_map_invalid_noflush); 2177 - _vm_unmap_aliases(start, end, 1); 2175 + _vm_unmap_aliases(start, end, flush_dmap); 2178 2176 set_area_direct_map(area, set_direct_map_default_noflush); 2179 2177 } 2180 2178