Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A set of fixes for x86:

- Fix the swapped outb() parameters in the KASLR code

- Fix the PKEY handling at fork which missed to preserve the pkey
state for the child. Comes with a test case to validate that.

- Fix the entry stack handling for XEN PV to respect that XEN PV
systems enter the function already on the current thread stack and
not on the trampoline.

- Fix kexec load failure caused by using a stale value when the
kexec_buf structure is reused for subsequent allocations.

- Fix a bogus sizeof() in the memory encryption code

- Enforce PCI dependency for the Intel Low Power Subsystem

- Enforce PCI_LOCKLESS_CONFIG when PCI is enabled"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/Kconfig: Select PCI_LOCKLESS_CONFIG if PCI is enabled
x86/entry/64/compat: Fix stack switching for XEN PV
x86/kexec: Fix a kexec_file_load() failure
x86/mm/mem_encrypt: Fix erroneous sizeof()
x86/selftests/pkeys: Fork() to check for state being preserved
x86/pkeys: Properly copy pkey state at fork()
x86/kaslr: Fix incorrect i8254 outb() parameters
x86/intel/lpss: Make PCI dependency explicit

+61 -17
+1 -1
arch/x86/Kconfig
··· 198 198 select IRQ_FORCED_THREADING 199 199 select NEED_SG_DMA_LENGTH 200 200 select PCI_DOMAINS if PCI 201 - select PCI_LOCKLESS_CONFIG 201 + select PCI_LOCKLESS_CONFIG if PCI 202 202 select PERF_EVENTS 203 203 select RTC_LIB 204 204 select RTC_MC146818_LIB
+4 -2
arch/x86/entry/entry_64_compat.S
··· 361 361 362 362 /* Need to switch before accessing the thread stack. */ 363 363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 364 - movq %rsp, %rdi 364 + /* In the Xen PV case we already run on the thread stack. */ 365 + ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV 365 366 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 366 367 367 368 pushq 6*8(%rdi) /* regs->ss */ ··· 371 370 pushq 3*8(%rdi) /* regs->cs */ 372 371 pushq 2*8(%rdi) /* regs->ip */ 373 372 pushq 1*8(%rdi) /* regs->orig_ax */ 374 - 375 373 pushq (%rdi) /* pt_regs->di */ 374 + .Lint80_keep_stack: 375 + 376 376 pushq %rsi /* pt_regs->si */ 377 377 xorl %esi, %esi /* nospec si */ 378 378 pushq %rdx /* pt_regs->dx */
+18
arch/x86/include/asm/mmu_context.h
··· 178 178 179 179 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 180 180 181 + /* 182 + * Init a new mm. Used on mm copies, like at fork() 183 + * and on mm's that are brand-new, like at execve(). 184 + */ 181 185 static inline int init_new_context(struct task_struct *tsk, 182 186 struct mm_struct *mm) 183 187 { ··· 232 228 } while (0) 233 229 #endif 234 230 231 + static inline void arch_dup_pkeys(struct mm_struct *oldmm, 232 + struct mm_struct *mm) 233 + { 234 + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 235 + if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) 236 + return; 237 + 238 + /* Duplicate the oldmm pkey state in mm: */ 239 + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; 240 + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; 241 + #endif 242 + } 243 + 235 244 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 236 245 { 246 + arch_dup_pkeys(oldmm, mm); 237 247 paravirt_arch_dup_mmap(oldmm, mm); 238 248 return ldt_dup_context(oldmm, mm); 239 249 }
+1
arch/x86/kernel/crash.c
··· 470 470 471 471 kbuf.memsz = kbuf.bufsz; 472 472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 473 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 473 474 ret = kexec_add_buffer(&kbuf); 474 475 if (ret) { 475 476 vfree((void *)image->arch.elf_headers);
+2
arch/x86/kernel/kexec-bzimage64.c
··· 434 434 kbuf.memsz = PAGE_ALIGN(header->init_size); 435 435 kbuf.buf_align = header->kernel_alignment; 436 436 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; 437 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 437 438 ret = kexec_add_buffer(&kbuf); 438 439 if (ret) 439 440 goto out_free_params; ··· 449 448 kbuf.bufsz = kbuf.memsz = initrd_len; 450 449 kbuf.buf_align = PAGE_SIZE; 451 450 kbuf.buf_min = MIN_INITRD_LOAD_ADDR; 451 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 452 452 ret = kexec_add_buffer(&kbuf); 453 453 if (ret) 454 454 goto out_free_params;
+2 -2
arch/x86/lib/kaslr.c
··· 36 36 u16 status, timer; 37 37 38 38 do { 39 - outb(I8254_PORT_CONTROL, 40 - I8254_CMD_READBACK | I8254_SELECT_COUNTER0); 39 + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, 40 + I8254_PORT_CONTROL); 41 41 status = inb(I8254_PORT_COUNTER0); 42 42 timer = inb(I8254_PORT_COUNTER0); 43 43 timer |= inb(I8254_PORT_COUNTER0) << 8;
+2 -2
arch/x86/mm/mem_encrypt_identity.c
··· 158 158 pmd = pmd_offset(pud, ppd->vaddr); 159 159 if (pmd_none(*pmd)) { 160 160 pte = ppd->pgtable_area; 161 - memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); 162 - ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; 161 + memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE); 162 + ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE; 163 163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); 164 164 } 165 165
+31 -10
tools/testing/selftests/x86/protection_keys.c
··· 1133 1133 pkey_assert(err); 1134 1134 } 1135 1135 1136 + void become_child(void) 1137 + { 1138 + pid_t forkret; 1139 + 1140 + forkret = fork(); 1141 + pkey_assert(forkret >= 0); 1142 + dprintf3("[%d] fork() ret: %d\n", getpid(), forkret); 1143 + 1144 + if (!forkret) { 1145 + /* in the child */ 1146 + return; 1147 + } 1148 + exit(0); 1149 + } 1150 + 1136 1151 /* Assumes that all pkeys other than 'pkey' are unallocated */ 1137 1152 void test_pkey_alloc_exhaust(int *ptr, u16 pkey) 1138 1153 { ··· 1156 1141 int nr_allocated_pkeys = 0; 1157 1142 int i; 1158 1143 1159 - for (i = 0; i < NR_PKEYS*2; i++) { 1144 + for (i = 0; i < NR_PKEYS*3; i++) { 1160 1145 int new_pkey; 1161 1146 dprintf1("%s() alloc loop: %d\n", __func__, i); 1162 1147 new_pkey = alloc_pkey(); ··· 1167 1152 if ((new_pkey == -1) && (errno == ENOSPC)) { 1168 1153 dprintf2("%s() failed to allocate pkey after %d tries\n", 1169 1154 __func__, nr_allocated_pkeys); 1170 - break; 1155 + } else { 1156 + /* 1157 + * Ensure the number of successes never 1158 + * exceeds the number of keys supported 1159 + * in the hardware. 1160 + */ 1161 + pkey_assert(nr_allocated_pkeys < NR_PKEYS); 1162 + allocated_pkeys[nr_allocated_pkeys++] = new_pkey; 1171 1163 } 1172 - pkey_assert(nr_allocated_pkeys < NR_PKEYS); 1173 - allocated_pkeys[nr_allocated_pkeys++] = new_pkey; 1164 + 1165 + /* 1166 + * Make sure that allocation state is properly 1167 + * preserved across fork(). 1168 + */ 1169 + if (i == NR_PKEYS*2) 1170 + become_child(); 1174 1171 } 1175 1172 1176 1173 dprintf3("%s()::%d\n", __func__, __LINE__); 1177 - 1178 - /* 1179 - * ensure it did not reach the end of the loop without 1180 - * failure: 1181 - */ 1182 - pkey_assert(i < NR_PKEYS*2); 1183 1174 1184 1175 /* 1185 1176 * There are 16 pkeys supported in hardware. Three are