Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 fixes from Ingo Molnar:
- topology enumeration fixes
- KASAN fix
- two entry fixes (not yet the big series related to KASLR)
- remove obsolete code
- instruction decoder fix
- better /dev/mem sanity checks, hopefully working better this time
- pkeys fixes
- two ACPI fixes
- 5-level paging related fixes
- UMIP fixes that should make application visible faults more debuggable
- boot fix for weird virtualization environment

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
x86/decoder: Add new TEST instruction pattern
x86/PCI: Remove unused HyperTransport interrupt support
x86/umip: Fix insn_get_code_seg_params()'s return value
x86/boot/KASLR: Remove unused variable
x86/entry/64: Add missing irqflags tracing to native_load_gs_index()
x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow
x86/entry/64: Fix entry_SYSCALL_64_after_hwframe() IRQ tracing
x86/pkeys/selftests: Fix protection keys write() warning
x86/pkeys/selftests: Rename 'si_pkey' to 'siginfo_pkey'
x86/mpx/selftests: Fix up weird arrays
x86/pkeys: Update documentation about availability
x86/umip: Print a warning into the syslog if UMIP-protected instructions are used
x86/smpboot: Fix __max_logical_packages estimate
x86/topology: Avoid wasting 128k for package id array
perf/x86/intel/uncore: Cache logical pkg id in uncore driver
x86/acpi: Reduce code duplication in mp_override_legacy_irq()
x86/acpi: Handle SCI interrupts above legacy space gracefully
x86/boot: Fix boot failure when SMP MP-table is based at 0
x86/mm: Limit mmap() of /dev/mem to valid physical addresses
x86/selftests: Add test for mapping placement for 5-level paging
...

+476 -620
+7 -2
Documentation/x86/protection-keys.txt
··· 1 - Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature 2 - which will be found on future Intel CPUs. 1 + Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature 2 + which is found on Intel's Skylake "Scalable Processor" Server CPUs. 3 + It will be avalable in future non-server parts. 4 + 5 + For anyone wishing to test or use this feature, it is available in 6 + Amazon's EC2 C5 instances and is known to work there using an Ubuntu 7 + 17.04 image. 3 8 4 9 Memory Protection Keys provides a mechanism for enforcing page-based 5 10 protections, but without requiring modification of the page tables
+9 -3
arch/x86/Kconfig
··· 1804 1804 If unsure, say Y. 1805 1805 1806 1806 config X86_INTEL_UMIP 1807 - def_bool n 1807 + def_bool y 1808 1808 depends on CPU_SUP_INTEL 1809 1809 prompt "Intel User Mode Instruction Prevention" if EXPERT 1810 1810 ---help--- 1811 1811 The User Mode Instruction Prevention (UMIP) is a security 1812 1812 feature in newer Intel processors. If enabled, a general 1813 - protection fault is issued if the instructions SGDT, SLDT, 1814 - SIDT, SMSW and STR are executed in user mode. 1813 + protection fault is issued if the SGDT, SLDT, SIDT, SMSW 1814 + or STR instructions are executed in user mode. These instructions 1815 + unnecessarily expose information about the hardware state. 1816 + 1817 + The vast majority of applications do not use these instructions. 1818 + For the very few that do, software emulation is provided in 1819 + specific cases in protected and virtual-8086 modes. Emulated 1820 + results are dummy. 1815 1821 1816 1822 config X86_INTEL_MPX 1817 1823 prompt "Intel MPX (Memory Protection Extensions)"
+2 -3
arch/x86/boot/compressed/kaslr.c
··· 171 171 static void mem_avoid_memmap(char *str) 172 172 { 173 173 static int i; 174 - int rc; 175 174 176 175 if (i >= MAX_MEMMAP_REGIONS) 177 176 return; ··· 218 219 return 0; 219 220 220 221 tmp_cmdline = malloc(len + 1); 221 - if (!tmp_cmdline ) 222 + if (!tmp_cmdline) 222 223 error("Failed to allocate space for tmp_cmdline"); 223 224 224 225 memcpy(tmp_cmdline, args, len); ··· 362 363 cmd_line |= boot_params->hdr.cmd_line_ptr; 363 364 /* Calculate size of cmd_line. */ 364 365 ptr = (char *)(unsigned long)cmd_line; 365 - for (cmd_line_size = 0; ptr[cmd_line_size++]; ) 366 + for (cmd_line_size = 0; ptr[cmd_line_size++];) 366 367 ; 367 368 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; 368 369 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
+10 -4
arch/x86/entry/entry_64.S
··· 51 51 END(native_usergs_sysret64) 52 52 #endif /* CONFIG_PARAVIRT */ 53 53 54 - .macro TRACE_IRQS_IRETQ 54 + .macro TRACE_IRQS_FLAGS flags:req 55 55 #ifdef CONFIG_TRACE_IRQFLAGS 56 - bt $9, EFLAGS(%rsp) /* interrupts off? */ 56 + bt $9, \flags /* interrupts off? */ 57 57 jnc 1f 58 58 TRACE_IRQS_ON 59 59 1: 60 60 #endif 61 + .endm 62 + 63 + .macro TRACE_IRQS_IRETQ 64 + TRACE_IRQS_FLAGS EFLAGS(%rsp) 61 65 .endm 62 66 63 67 /* ··· 152 148 movq %rsp, PER_CPU_VAR(rsp_scratch) 153 149 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 154 150 155 - TRACE_IRQS_OFF 156 - 157 151 /* Construct struct pt_regs on stack */ 158 152 pushq $__USER_DS /* pt_regs->ss */ 159 153 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ ··· 171 169 pushq %r11 /* pt_regs->r11 */ 172 170 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ 173 171 UNWIND_HINT_REGS extra=0 172 + 173 + TRACE_IRQS_OFF 174 174 175 175 /* 176 176 * If we need to do entry work or if we guess we'll need to do ··· 947 943 FRAME_BEGIN 948 944 pushfq 949 945 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 946 + TRACE_IRQS_OFF 950 947 SWAPGS 951 948 .Lgs_change: 952 949 movl %edi, %gs 953 950 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 954 951 SWAPGS 952 + TRACE_IRQS_FLAGS (%rsp) 955 953 popfq 956 954 FRAME_END 957 955 ret
+2 -2
arch/x86/events/intel/uncore.c
··· 975 975 int i, phys_id, pkg; 976 976 977 977 phys_id = uncore_pcibus_to_physid(pdev->bus); 978 - pkg = topology_phys_to_logical_pkg(phys_id); 979 978 980 979 box = pci_get_drvdata(pdev); 981 980 if (!box) { 981 + pkg = topology_phys_to_logical_pkg(phys_id); 982 982 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { 983 983 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) { 984 984 uncore_extra_pci_dev[pkg].dev[i] = NULL; ··· 994 994 return; 995 995 996 996 pci_set_drvdata(pdev, NULL); 997 - pmu->boxes[pkg] = NULL; 997 + pmu->boxes[box->pkgid] = NULL; 998 998 if (atomic_dec_return(&pmu->activeboxes) == 0) 999 999 uncore_pmu_unregister(pmu); 1000 1000 uncore_box_exit(box);
+1 -1
arch/x86/events/intel/uncore.h
··· 100 100 101 101 struct intel_uncore_box { 102 102 int pci_phys_id; 103 - int pkgid; 103 + int pkgid; /* Logical package ID */ 104 104 int n_active; /* number of active events */ 105 105 int n_events; 106 106 int cpu; /* cpu to collect events */
+1 -1
arch/x86/events/intel/uncore_snbep.c
··· 1057 1057 1058 1058 if (reg1->idx != EXTRA_REG_NONE) { 1059 1059 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; 1060 - int pkg = topology_phys_to_logical_pkg(box->pci_phys_id); 1060 + int pkg = box->pkgid; 1061 1061 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx]; 1062 1062 1063 1063 if (filter_pdev) {
+1
arch/x86/include/asm/elf.h
··· 309 309 extern unsigned long task_size_32bit(void); 310 310 extern unsigned long task_size_64bit(int full_addr_space); 311 311 extern unsigned long get_mmap_base(int is_legacy); 312 + extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); 312 313 313 314 #ifdef CONFIG_X86_32 314 315
-8
arch/x86/include/asm/hw_irq.h
··· 99 99 void *dmar_data; 100 100 }; 101 101 #endif 102 - #ifdef CONFIG_HT_IRQ 103 - struct { 104 - int ht_pos; 105 - int ht_idx; 106 - struct pci_dev *ht_dev; 107 - void *ht_update; 108 - }; 109 - #endif 110 102 #ifdef CONFIG_X86_UV 111 103 struct { 112 104 int uv_limit;
-46
arch/x86/include/asm/hypertransport.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_X86_HYPERTRANSPORT_H 3 - #define _ASM_X86_HYPERTRANSPORT_H 4 - 5 - /* 6 - * Constants for x86 Hypertransport Interrupts. 7 - */ 8 - 9 - #define HT_IRQ_LOW_BASE 0xf8000000 10 - 11 - #define HT_IRQ_LOW_VECTOR_SHIFT 16 12 - #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 13 - #define HT_IRQ_LOW_VECTOR(v) \ 14 - (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) 15 - 16 - #define HT_IRQ_LOW_DEST_ID_SHIFT 8 17 - #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 18 - #define HT_IRQ_LOW_DEST_ID(v) \ 19 - (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) 20 - 21 - #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 22 - #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 23 - 24 - #define HT_IRQ_LOW_RQEOI_EDGE 0x0000000 25 - #define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020 26 - 27 - 28 - #define HT_IRQ_LOW_MT_FIXED 0x0000000 29 - #define HT_IRQ_LOW_MT_ARBITRATED 0x0000004 30 - #define HT_IRQ_LOW_MT_SMI 0x0000008 31 - #define HT_IRQ_LOW_MT_NMI 0x000000c 32 - #define HT_IRQ_LOW_MT_INIT 0x0000010 33 - #define HT_IRQ_LOW_MT_STARTUP 0x0000014 34 - #define HT_IRQ_LOW_MT_EXTINT 0x0000018 35 - #define HT_IRQ_LOW_MT_LINT1 0x000008c 36 - #define HT_IRQ_LOW_MT_LINT0 0x0000098 37 - 38 - #define HT_IRQ_LOW_IRQ_MASKED 0x0000001 39 - 40 - 41 - #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 42 - #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff 43 - #define HT_IRQ_HIGH_DEST_ID(v) \ 44 - ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) 45 - 46 - #endif /* _ASM_X86_HYPERTRANSPORT_H */
+1 -1
arch/x86/include/asm/insn-eval.h
··· 18 18 void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); 19 19 int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); 20 20 unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); 21 - char insn_get_code_seg_params(struct pt_regs *regs); 21 + int insn_get_code_seg_params(struct pt_regs *regs); 22 22 23 23 #endif /* _ASM_X86_INSN_EVAL_H */
+4
arch/x86/include/asm/io.h
··· 111 111 112 112 #endif 113 113 114 + #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 115 + extern int valid_phys_addr_range(phys_addr_t addr, size_t size); 116 + extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 117 + 114 118 /** 115 119 * virt_to_phys - map virtual addresses to physical 116 120 * @address: address to remap
-6
arch/x86/include/asm/irqdomain.h
··· 56 56 static inline void arch_init_msi_domain(struct irq_domain *domain) { } 57 57 #endif 58 58 59 - #ifdef CONFIG_HT_IRQ 60 - extern void arch_init_htirq_domain(struct irq_domain *domain); 61 - #else 62 - static inline void arch_init_htirq_domain(struct irq_domain *domain) { } 63 - #endif 64 - 65 59 #endif
+1
arch/x86/include/asm/processor.h
··· 132 132 /* Index into per_cpu list: */ 133 133 u16 cpu_index; 134 134 u32 microcode; 135 + unsigned initialized : 1; 135 136 } __randomize_layout; 136 137 137 138 struct cpuid_regs {
+38 -23
arch/x86/kernel/acpi/boot.c
··· 342 342 #ifdef CONFIG_X86_IO_APIC 343 343 #define MP_ISA_BUS 0 344 344 345 + static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, 346 + u8 trigger, u32 gsi); 347 + 345 348 static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, 346 349 u32 gsi) 347 350 { 348 - int ioapic; 349 - int pin; 350 - struct mpc_intsrc mp_irq; 351 - 352 351 /* 353 352 * Check bus_irq boundary. 354 353 */ ··· 357 358 } 358 359 359 360 /* 360 - * Convert 'gsi' to 'ioapic.pin'. 361 - */ 362 - ioapic = mp_find_ioapic(gsi); 363 - if (ioapic < 0) 364 - return; 365 - pin = mp_find_ioapic_pin(ioapic, gsi); 366 - 367 - /* 368 361 * TBD: This check is for faulty timer entries, where the override 369 362 * erroneously sets the trigger to level, resulting in a HUGE 370 363 * increase of timer interrupts! ··· 364 373 if ((bus_irq == 0) && (trigger == 3)) 365 374 trigger = 1; 366 375 367 - mp_irq.type = MP_INTSRC; 368 - mp_irq.irqtype = mp_INT; 369 - mp_irq.irqflag = (trigger << 2) | polarity; 370 - mp_irq.srcbus = MP_ISA_BUS; 371 - mp_irq.srcbusirq = bus_irq; /* IRQ */ 372 - mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ 373 - mp_irq.dstirq = pin; /* INTIN# */ 374 - 375 - mp_save_irq(&mp_irq); 376 - 376 + if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) 377 + return; 377 378 /* 378 379 * Reset default identity mapping if gsi is also an legacy IRQ, 379 380 * otherwise there will be more than one entry with the same GSI ··· 409 426 410 427 mp_save_irq(&mp_irq); 411 428 #endif 429 + return 0; 430 + } 431 + 432 + static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, 433 + u8 trigger, u32 gsi) 434 + { 435 + struct mpc_intsrc mp_irq; 436 + int ioapic, pin; 437 + 438 + /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ 439 + ioapic = mp_find_ioapic(gsi); 440 + if (ioapic < 0) { 441 + pr_warn("Failed to find ioapic for gsi : %u\n", gsi); 442 + return ioapic; 443 + } 444 + 445 + pin = mp_find_ioapic_pin(ioapic, gsi); 446 + 447 + mp_irq.type = MP_INTSRC; 448 + mp_irq.irqtype = mp_INT; 449 + mp_irq.irqflag = (trigger << 2) | polarity; 450 + mp_irq.srcbus = MP_ISA_BUS; 451 + mp_irq.srcbusirq = bus_irq; 452 + mp_irq.dstapic = mpc_ioapic_id(ioapic); 453 + mp_irq.dstirq = pin; 454 + 455 + mp_save_irq(&mp_irq); 456 + 412 457 return 0; 413 458 } 414 459 ··· 484 473 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) 485 474 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; 486 475 487 - mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); 476 + if (bus_irq < NR_IRQS_LEGACY) 477 + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); 478 + else 479 + mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); 480 + 488 481 acpi_penalize_sci_irq(bus_irq, trigger, polarity); 489 482 490 483 /*
-1
arch/x86/kernel/apic/Makefile
··· 12 12 13 13 obj-$(CONFIG_X86_IO_APIC) += io_apic.o 14 14 obj-$(CONFIG_PCI_MSI) += msi.o 15 - obj-$(CONFIG_HT_IRQ) += htirq.o 16 15 obj-$(CONFIG_SMP) += ipi.o 17 16 18 17 ifeq ($(CONFIG_X86_64),y)
-198
arch/x86/kernel/apic/htirq.c
··· 1 - /* 2 - * Support Hypertransport IRQ 3 - * 4 - * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 - * Moved from arch/x86/kernel/apic/io_apic.c. 6 - * Jiang Liu <jiang.liu@linux.intel.com> 7 - * Add support of hierarchical irqdomain 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License version 2 as 11 - * published by the Free Software Foundation. 12 - */ 13 - #include <linux/mm.h> 14 - #include <linux/interrupt.h> 15 - #include <linux/init.h> 16 - #include <linux/device.h> 17 - #include <linux/pci.h> 18 - #include <linux/htirq.h> 19 - #include <asm/irqdomain.h> 20 - #include <asm/hw_irq.h> 21 - #include <asm/apic.h> 22 - #include <asm/hypertransport.h> 23 - 24 - static struct irq_domain *htirq_domain; 25 - 26 - /* 27 - * Hypertransport interrupt support 28 - */ 29 - static int 30 - ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 31 - { 32 - struct irq_data *parent = data->parent_data; 33 - int ret; 34 - 35 - ret = parent->chip->irq_set_affinity(parent, mask, force); 36 - if (ret >= 0) { 37 - struct ht_irq_msg msg; 38 - struct irq_cfg *cfg = irqd_cfg(data); 39 - 40 - fetch_ht_irq_msg(data->irq, &msg); 41 - msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | 42 - HT_IRQ_LOW_DEST_ID_MASK); 43 - msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) | 44 - HT_IRQ_LOW_DEST_ID(cfg->dest_apicid); 45 - msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 46 - msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid); 47 - write_ht_irq_msg(data->irq, &msg); 48 - } 49 - 50 - return ret; 51 - } 52 - 53 - static struct irq_chip ht_irq_chip = { 54 - .name = "PCI-HT", 55 - .irq_mask = mask_ht_irq, 56 - .irq_unmask = unmask_ht_irq, 57 - .irq_ack = irq_chip_ack_parent, 58 - .irq_set_affinity = ht_set_affinity, 59 - .irq_retrigger = irq_chip_retrigger_hierarchy, 60 - .flags = IRQCHIP_SKIP_SET_WAKE, 61 - }; 62 - 63 - static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq, 64 - unsigned int nr_irqs, void *arg) 65 - { 66 - struct ht_irq_cfg *ht_cfg; 67 - struct irq_alloc_info *info = arg; 68 - struct pci_dev *dev; 69 - irq_hw_number_t hwirq; 70 - int ret; 71 - 72 - if (nr_irqs > 1 || !info) 73 - return -EINVAL; 74 - 75 - dev = info->ht_dev; 76 - hwirq = (info->ht_idx & 0xFF) | 77 - PCI_DEVID(dev->bus->number, dev->devfn) << 8 | 78 - (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24; 79 - if (irq_find_mapping(domain, hwirq) > 0) 80 - return -EEXIST; 81 - 82 - ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL); 83 - if (!ht_cfg) 84 - return -ENOMEM; 85 - 86 - ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); 87 - if (ret < 0) { 88 - kfree(ht_cfg); 89 - return ret; 90 - } 91 - 92 - /* Initialize msg to a value that will never match the first write. */ 93 - ht_cfg->msg.address_lo = 0xffffffff; 94 - ht_cfg->msg.address_hi = 0xffffffff; 95 - ht_cfg->dev = info->ht_dev; 96 - ht_cfg->update = info->ht_update; 97 - ht_cfg->pos = info->ht_pos; 98 - ht_cfg->idx = 0x10 + (info->ht_idx * 2); 99 - irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg, 100 - handle_edge_irq, ht_cfg, "edge"); 101 - 102 - return 0; 103 - } 104 - 105 - static void htirq_domain_free(struct irq_domain *domain, unsigned int virq, 106 - unsigned int nr_irqs) 107 - { 108 - struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); 109 - 110 - BUG_ON(nr_irqs != 1); 111 - kfree(irq_data->chip_data); 112 - irq_domain_free_irqs_top(domain, virq, nr_irqs); 113 - } 114 - 115 - static int htirq_domain_activate(struct irq_domain *domain, 116 - struct irq_data *irq_data, bool early) 117 - { 118 - struct ht_irq_msg msg; 119 - struct irq_cfg *cfg = irqd_cfg(irq_data); 120 - 121 - msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid); 122 - msg.address_lo = 123 - HT_IRQ_LOW_BASE | 124 - HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) | 125 - HT_IRQ_LOW_VECTOR(cfg->vector) | 126 - ((apic->irq_dest_mode == 0) ? 127 - HT_IRQ_LOW_DM_PHYSICAL : 128 - HT_IRQ_LOW_DM_LOGICAL) | 129 - HT_IRQ_LOW_RQEOI_EDGE | 130 - ((apic->irq_delivery_mode != dest_LowestPrio) ? 131 - HT_IRQ_LOW_MT_FIXED : 132 - HT_IRQ_LOW_MT_ARBITRATED) | 133 - HT_IRQ_LOW_IRQ_MASKED; 134 - write_ht_irq_msg(irq_data->irq, &msg); 135 - return 0; 136 - } 137 - 138 - static void htirq_domain_deactivate(struct irq_domain *domain, 139 - struct irq_data *irq_data) 140 - { 141 - struct ht_irq_msg msg; 142 - 143 - memset(&msg, 0, sizeof(msg)); 144 - write_ht_irq_msg(irq_data->irq, &msg); 145 - } 146 - 147 - static const struct irq_domain_ops htirq_domain_ops = { 148 - .alloc = htirq_domain_alloc, 149 - .free = htirq_domain_free, 150 - .activate = htirq_domain_activate, 151 - .deactivate = htirq_domain_deactivate, 152 - }; 153 - 154 - void __init arch_init_htirq_domain(struct irq_domain *parent) 155 - { 156 - struct fwnode_handle *fn; 157 - 158 - if (disable_apic) 159 - return; 160 - 161 - fn = irq_domain_alloc_named_fwnode("PCI-HT"); 162 - if (!fn) 163 - goto warn; 164 - 165 - htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL); 166 - irq_domain_free_fwnode(fn); 167 - if (!htirq_domain) 168 - goto warn; 169 - 170 - htirq_domain->parent = parent; 171 - return; 172 - 173 - warn: 174 - pr_warn("Failed to initialize irqdomain for HTIRQ.\n"); 175 - } 176 - 177 - int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev, 178 - ht_irq_update_t *update) 179 - { 180 - struct irq_alloc_info info; 181 - 182 - if (!htirq_domain) 183 - return -ENOSYS; 184 - 185 - init_irq_alloc_info(&info, NULL); 186 - info.ht_idx = idx; 187 - info.ht_pos = pos; 188 - info.ht_dev = dev; 189 - info.ht_update = update; 190 - 191 - return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev), 192 - &info); 193 - } 194 - 195 - void arch_teardown_ht_irq(unsigned int irq) 196 - { 197 - irq_domain_free_irqs(irq, 1); 198 - }
+2 -3
arch/x86/kernel/apic/vector.c
··· 1 1 /* 2 - * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. 2 + * Local APIC related interfaces to support IOAPIC, MSI, etc. 3 3 * 4 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 5 * Moved from arch/x86/kernel/apic/io_apic.c. ··· 601 601 nr_irqs = NR_VECTORS * nr_cpu_ids; 602 602 603 603 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; 604 - #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 604 + #if defined(CONFIG_PCI_MSI) 605 605 /* 606 606 * for MSI and HT dyn irq 607 607 */ ··· 663 663 irq_set_default_host(x86_vector_domain); 664 664 665 665 arch_init_msi_domain(x86_vector_domain); 666 - arch_init_htirq_domain(x86_vector_domain); 667 666 668 667 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); 669 668
+2
arch/x86/kernel/cpu/common.c
··· 341 341 342 342 cr4_set_bits(X86_CR4_UMIP); 343 343 344 + pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n"); 345 + 344 346 return; 345 347 346 348 out:
+4 -2
arch/x86/kernel/mpparse.c
··· 431 431 } 432 432 433 433 static unsigned long mpf_base; 434 + static bool mpf_found; 434 435 435 436 static unsigned long __init get_mpc_size(unsigned long physptr) 436 437 { ··· 505 504 if (!smp_found_config) 506 505 return; 507 506 508 - if (!mpf_base) 507 + if (!mpf_found) 509 508 return; 510 509 511 510 if (acpi_lapic && early) ··· 594 593 smp_found_config = 1; 595 594 #endif 596 595 mpf_base = base; 596 + mpf_found = true; 597 597 598 598 pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", 599 599 base, base + sizeof(*mpf) - 1, mpf); ··· 860 858 if (!enable_update_mptable) 861 859 return 0; 862 860 863 - if (!mpf_base) 861 + if (!mpf_found) 864 862 return 0; 865 863 866 864 mpf = early_memremap(mpf_base, sizeof(*mpf));
+43 -93
arch/x86/kernel/smpboot.c
··· 101 101 EXPORT_PER_CPU_SYMBOL(cpu_info); 102 102 103 103 /* Logical package management. We might want to allocate that dynamically */ 104 - static int *physical_to_logical_pkg __read_mostly; 105 - static unsigned long *physical_package_map __read_mostly;; 106 - static unsigned int max_physical_pkg_id __read_mostly; 107 104 unsigned int __max_logical_packages __read_mostly; 108 105 EXPORT_SYMBOL(__max_logical_packages); 109 106 static unsigned int logical_packages __read_mostly; ··· 278 281 } 279 282 280 283 /** 281 - * topology_update_package_map - Update the physical to logical package map 282 - * @pkg: The physical package id as retrieved via CPUID 283 - * @cpu: The cpu for which this is updated 284 - */ 285 - int topology_update_package_map(unsigned int pkg, unsigned int cpu) 286 - { 287 - unsigned int new; 288 - 289 - /* Called from early boot ? */ 290 - if (!physical_package_map) 291 - return 0; 292 - 293 - if (pkg >= max_physical_pkg_id) 294 - return -EINVAL; 295 - 296 - /* Set the logical package id */ 297 - if (test_and_set_bit(pkg, physical_package_map)) 298 - goto found; 299 - 300 - if (logical_packages >= __max_logical_packages) { 301 - pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n", 302 - logical_packages, cpu, __max_logical_packages); 303 - return -ENOSPC; 304 - } 305 - 306 - new = logical_packages++; 307 - if (new != pkg) { 308 - pr_info("CPU %u Converting physical %u to logical package %u\n", 309 - cpu, pkg, new); 310 - } 311 - physical_to_logical_pkg[pkg] = new; 312 - 313 - found: 314 - cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg]; 315 - return 0; 316 - } 317 - 318 - /** 319 284 * topology_phys_to_logical_pkg - Map a physical package id to a logical 320 285 * 321 286 * Returns logical package id or -1 if not found 322 287 */ 323 288 int topology_phys_to_logical_pkg(unsigned int phys_pkg) 324 289 { 325 - if (phys_pkg >= max_physical_pkg_id) 326 - return -1; 327 - return physical_to_logical_pkg[phys_pkg]; 290 + int cpu; 291 + 292 + for_each_possible_cpu(cpu) { 293 + struct cpuinfo_x86 *c = &cpu_data(cpu); 294 + 295 + if (c->initialized && c->phys_proc_id == phys_pkg) 296 + return c->logical_proc_id; 297 + } 298 + return -1; 328 299 } 329 300 EXPORT_SYMBOL(topology_phys_to_logical_pkg); 330 301 331 - static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu) 302 + /** 303 + * topology_update_package_map - Update the physical to logical package map 304 + * @pkg: The physical package id as retrieved via CPUID 305 + * @cpu: The cpu for which this is updated 306 + */ 307 + int topology_update_package_map(unsigned int pkg, unsigned int cpu) 332 308 { 333 - unsigned int ncpus; 334 - size_t size; 309 + int new; 335 310 336 - /* 337 - * Today neither Intel nor AMD support heterogenous systems. That 338 - * might change in the future.... 339 - * 340 - * While ideally we'd want '* smp_num_siblings' in the below @ncpus 341 - * computation, this won't actually work since some Intel BIOSes 342 - * report inconsistent HT data when they disable HT. 343 - * 344 - * In particular, they reduce the APIC-IDs to only include the cores, 345 - * but leave the CPUID topology to say there are (2) siblings. 346 - * This means we don't know how many threads there will be until 347 - * after the APIC enumeration. 348 - * 349 - * By not including this we'll sometimes over-estimate the number of 350 - * logical packages by the amount of !present siblings, but this is 351 - * still better than MAX_LOCAL_APIC. 352 - * 353 - * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited 354 - * on the command line leading to a similar issue as the HT disable 355 - * problem because the hyperthreads are usually enumerated after the 356 - * primary cores. 357 - */ 358 - ncpus = boot_cpu_data.x86_max_cores; 359 - if (!ncpus) { 360 - pr_warn("x86_max_cores == zero !?!?"); 361 - ncpus = 1; 311 + /* Already available somewhere? */ 312 + new = topology_phys_to_logical_pkg(pkg); 313 + if (new >= 0) 314 + goto found; 315 + 316 + new = logical_packages++; 317 + if (new != pkg) { 318 + pr_info("CPU %u Converting physical %u to logical package %u\n", 319 + cpu, pkg, new); 362 320 } 363 - 364 - __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); 365 - logical_packages = 0; 366 - 367 - /* 368 - * Possibly larger than what we need as the number of apic ids per 369 - * package can be smaller than the actual used apic ids. 370 - */ 371 - max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus); 372 - size = max_physical_pkg_id * sizeof(unsigned int); 373 - physical_to_logical_pkg = kmalloc(size, GFP_KERNEL); 374 - memset(physical_to_logical_pkg, 0xff, size); 375 - size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long); 376 - physical_package_map = kzalloc(size, GFP_KERNEL); 377 - 378 - pr_info("Max logical packages: %u\n", __max_logical_packages); 379 - 380 - topology_update_package_map(c->phys_proc_id, cpu); 321 + found: 322 + cpu_data(cpu).logical_proc_id = new; 323 + return 0; 381 324 } 382 325 383 326 void __init smp_store_boot_cpu_info(void) ··· 327 390 328 391 *c = boot_cpu_data; 329 392 c->cpu_index = id; 330 - smp_init_package_map(c, id); 393 + topology_update_package_map(c->phys_proc_id, id); 394 + c->initialized = true; 331 395 } 332 396 333 397 /* ··· 339 401 { 340 402 struct cpuinfo_x86 *c = &cpu_data(id); 341 403 342 - *c = boot_cpu_data; 404 + /* Copy boot_cpu_data only on the first bringup */ 405 + if (!c->initialized) 406 + *c = boot_cpu_data; 343 407 c->cpu_index = id; 344 408 /* 345 409 * During boot time, CPU0 has this setup already. Save the info when 346 410 * bringing up AP or offlined CPU0. 347 411 */ 348 412 identify_secondary_cpu(c); 413 + c->initialized = true; 349 414 } 350 415 351 416 static bool ··· 1297 1356 1298 1357 void __init native_smp_cpus_done(unsigned int max_cpus) 1299 1358 { 1359 + int ncpus; 1360 + 1300 1361 pr_debug("Boot done\n"); 1362 + /* 1363 + * Today neither Intel nor AMD support heterogenous systems so 1364 + * extrapolate the boot cpu's data to all packages. 1365 + */ 1366 + ncpus = cpu_data(0).booted_cores * smp_num_siblings; 1367 + __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); 1368 + pr_info("Max logical packages: %u\n", __max_logical_packages); 1301 1369 1302 1370 if (x86_has_numa_in_package) 1303 1371 set_sched_topology(x86_numa_in_package_topology);
+7 -3
arch/x86/kernel/sys_x86_64.c
··· 188 188 if (len > TASK_SIZE) 189 189 return -ENOMEM; 190 190 191 + /* No address checking. See comment at mmap_address_hint_valid() */ 191 192 if (flags & MAP_FIXED) 192 193 return addr; 193 194 ··· 198 197 199 198 /* requesting a specific address */ 200 199 if (addr) { 201 - addr = PAGE_ALIGN(addr); 200 + addr &= PAGE_MASK; 201 + if (!mmap_address_hint_valid(addr, len)) 202 + goto get_unmapped_area; 203 + 202 204 vma = find_vma(mm, addr); 203 - if (TASK_SIZE - len >= addr && 204 - (!vma || addr + len <= vm_start_gap(vma))) 205 + if (!vma || addr + len <= vm_start_gap(vma)) 205 206 return addr; 206 207 } 208 + get_unmapped_area: 207 209 208 210 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 209 211 info.length = len;
+75 -13
arch/x86/kernel/umip.c
··· 78 78 79 79 #define UMIP_INST_SGDT 0 /* 0F 01 /0 */ 80 80 #define UMIP_INST_SIDT 1 /* 0F 01 /1 */ 81 - #define UMIP_INST_SMSW 3 /* 0F 01 /4 */ 81 + #define UMIP_INST_SMSW 2 /* 0F 01 /4 */ 82 + #define UMIP_INST_SLDT 3 /* 0F 00 /0 */ 83 + #define UMIP_INST_STR 4 /* 0F 00 /1 */ 84 + 85 + const char * const umip_insns[5] = { 86 + [UMIP_INST_SGDT] = "SGDT", 87 + [UMIP_INST_SIDT] = "SIDT", 88 + [UMIP_INST_SMSW] = "SMSW", 89 + [UMIP_INST_SLDT] = "SLDT", 90 + [UMIP_INST_STR] = "STR", 91 + }; 92 + 93 + #define umip_pr_err(regs, fmt, ...) \ 94 + umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__) 95 + #define umip_pr_warning(regs, fmt, ...) \ 96 + umip_printk(regs, KERN_WARNING, fmt, ##__VA_ARGS__) 97 + 98 + /** 99 + * umip_printk() - Print a rate-limited message 100 + * @regs: Register set with the context in which the warning is printed 101 + * @log_level: Kernel log level to print the message 102 + * @fmt: The text string to print 103 + * 104 + * Print the text contained in @fmt. The print rate is limited to bursts of 5 105 + * messages every two minutes. The purpose of this customized version of 106 + * printk() is to print messages when user space processes use any of the 107 + * UMIP-protected instructions. Thus, the printed text is prepended with the 108 + * task name and process ID number of the current task as well as the 109 + * instruction and stack pointers in @regs as seen when entering kernel mode. 110 + * 111 + * Returns: 112 + * 113 + * None. 114 + */ 115 + static __printf(3, 4) 116 + void umip_printk(const struct pt_regs *regs, const char *log_level, 117 + const char *fmt, ...) 118 + { 119 + /* Bursts of 5 messages every two minutes */ 120 + static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5); 121 + struct task_struct *tsk = current; 122 + struct va_format vaf; 123 + va_list args; 124 + 125 + if (!__ratelimit(&ratelimit)) 126 + return; 127 + 128 + va_start(args, fmt); 129 + vaf.fmt = fmt; 130 + vaf.va = &args; 131 + printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm, 132 + task_pid_nr(tsk), regs->ip, regs->sp, &vaf); 133 + va_end(args); 134 + } 82 135 83 136 /** 84 137 * identify_insn() - Identify a UMIP-protected instruction ··· 171 118 default: 172 119 return -EINVAL; 173 120 } 121 + } else if (insn->opcode.bytes[1] == 0x0) { 122 + if (X86_MODRM_REG(insn->modrm.value) == 0) 123 + return UMIP_INST_SLDT; 124 + else if (X86_MODRM_REG(insn->modrm.value) == 1) 125 + return UMIP_INST_STR; 126 + else 127 + return -EINVAL; 128 + } else { 129 + return -EINVAL; 174 130 } 175 - 176 - /* SLDT AND STR are not emulated */ 177 - return -EINVAL; 178 131 } 179 132 180 133 /** ··· 287 228 if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV))) 288 229 return; 289 230 290 - pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n", 291 - tsk->comm, task_pid_nr(tsk), regs->ip, 292 - regs->sp, X86_PF_USER | X86_PF_WRITE, 293 - regs->ip); 231 + umip_pr_err(regs, "segfault in emulation. error%x\n", 232 + X86_PF_USER | X86_PF_WRITE); 294 233 } 295 234 296 235 /** ··· 319 262 unsigned char buf[MAX_INSN_SIZE]; 320 263 void __user *uaddr; 321 264 struct insn insn; 322 - char seg_defs; 265 + int seg_defs; 323 266 324 267 if (!regs) 325 - return false; 326 - 327 - /* Do not emulate 64-bit processes. */ 328 - if (user_64bit_mode(regs)) 329 268 return false; 330 269 331 270 /* ··· 374 321 umip_inst = identify_insn(&insn); 375 322 if (umip_inst < 0) 376 323 return false; 324 + 325 + umip_pr_warning(regs, "%s instruction cannot be used by applications.\n", 326 + umip_insns[umip_inst]); 327 + 328 + /* Do not emulate SLDT, STR or user long mode processes. */ 329 + if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs)) 330 + return false; 331 + 332 + umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n"); 377 333 378 334 if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size)) 379 335 return false;
+2 -2
arch/x86/lib/insn-eval.c
··· 733 733 * 734 734 * Returns: 735 735 * 736 - * A signed 8-bit value containing the default parameters on success. 736 + * An int containing ORed-in default parameters on success. 737 737 * 738 738 * -EINVAL on error. 739 739 */ 740 - char insn_get_code_seg_params(struct pt_regs *regs) 740 + int insn_get_code_seg_params(struct pt_regs *regs) 741 741 { 742 742 struct desc_struct *desc; 743 743 short sel;
+1 -1
arch/x86/lib/x86-opcode-map.txt
··· 896 896 897 897 GrpTable: Grp3_1 898 898 0: TEST Eb,Ib 899 - 1: 899 + 1: TEST Eb,Ib 900 900 2: NOT Eb 901 901 3: NEG Eb 902 902 4: MUL AL,Eb
+8 -3
arch/x86/mm/hugetlbpage.c
··· 158 158 if (len > TASK_SIZE) 159 159 return -ENOMEM; 160 160 161 + /* No address checking. See comment at mmap_address_hint_valid() */ 161 162 if (flags & MAP_FIXED) { 162 163 if (prepare_hugepage_range(file, addr, len)) 163 164 return -EINVAL; ··· 166 165 } 167 166 168 167 if (addr) { 169 - addr = ALIGN(addr, huge_page_size(h)); 168 + addr &= huge_page_mask(h); 169 + if (!mmap_address_hint_valid(addr, len)) 170 + goto get_unmapped_area; 171 + 170 172 vma = find_vma(mm, addr); 171 - if (TASK_SIZE - len >= addr && 172 - (!vma || addr + len <= vm_start_gap(vma))) 173 + if (!vma || addr + len <= vm_start_gap(vma)) 173 174 return addr; 174 175 } 176 + 177 + get_unmapped_area: 175 178 if (mm->get_unmapped_area == arch_get_unmapped_area) 176 179 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 177 180 pgoff, flags);
+62
arch/x86/mm/mmap.c
··· 33 33 #include <linux/compat.h> 34 34 #include <asm/elf.h> 35 35 36 + #include "physaddr.h" 37 + 36 38 struct va_alignment __read_mostly va_align = { 37 39 .flags = -1, 38 40 }; ··· 175 173 if (vma->vm_flags & VM_MPX) 176 174 return "[mpx]"; 177 175 return NULL; 176 + } 177 + 178 + /** 179 + * mmap_address_hint_valid - Validate the address hint of mmap 180 + * @addr: Address hint 181 + * @len: Mapping length 182 + * 183 + * Check whether @addr and @addr + @len result in a valid mapping. 184 + * 185 + * On 32bit this only checks whether @addr + @len is <= TASK_SIZE. 186 + * 187 + * On 64bit with 5-level page tables another sanity check is required 188 + * because mappings requested by mmap(@addr, 0) which cross the 47-bit 189 + * virtual address boundary can cause the following theoretical issue: 190 + * 191 + * An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr 192 + * is below the border of the 47-bit address space and @addr + @len is 193 + * above the border. 194 + * 195 + * With 4-level paging this request succeeds, but the resulting mapping 196 + * address will always be within the 47-bit virtual address space, because 197 + * the hint address does not result in a valid mapping and is 198 + * ignored. Hence applications which are not prepared to handle virtual 199 + * addresses above 47-bit work correctly. 200 + * 201 + * With 5-level paging this request would be granted and result in a 202 + * mapping which crosses the border of the 47-bit virtual address 203 + * space. If the application cannot handle addresses above 47-bit this 204 + * will lead to misbehaviour and hard to diagnose failures. 205 + * 206 + * Therefore ignore address hints which would result in a mapping crossing 207 + * the 47-bit virtual address boundary. 208 + * 209 + * Note, that in the same scenario with MAP_FIXED the behaviour is 210 + * different. The request with @addr < 47-bit and @addr + @len > 47-bit 211 + * fails on a 4-level paging machine but succeeds on a 5-level paging 212 + * machine. It is reasonable to expect that an application does not rely on 213 + * the failure of such a fixed mapping request, so the restriction is not 214 + * applied. 215 + */ 216 + bool mmap_address_hint_valid(unsigned long addr, unsigned long len) 217 + { 218 + if (TASK_SIZE - len < addr) 219 + return false; 220 + 221 + return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW); 222 + } 223 + 224 + /* Can we access it for direct reading/writing? Must be RAM: */ 225 + int valid_phys_addr_range(phys_addr_t addr, size_t count) 226 + { 227 + return addr + count <= __pa(high_memory); 228 + } 229 + 230 + /* Can we access it through mmap? Must be a valid physical address: */ 231 + int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) 232 + { 233 + phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT; 234 + 235 + return phys_addr_valid(addr + count - 1); 178 236 }
+4
drivers/char/mem.c
··· 343 343 size_t size = vma->vm_end - vma->vm_start; 344 344 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 345 345 346 + /* Does it even fit in phys_addr_t? */ 347 + if (offset >> PAGE_SHIFT != vma->vm_pgoff) 348 + return -EINVAL; 349 + 346 350 /* It's illegal to wrap around the end of the physical address space. */ 347 351 if (offset + (phys_addr_t)size - 1 < offset) 348 352 return -EINVAL;
-9
drivers/pci/Kconfig
··· 80 80 The PCI device frontend driver allows the kernel to import arbitrary 81 81 PCI devices from a PCI backend to support PCI driver domains. 82 82 83 - config HT_IRQ 84 - bool "Interrupts on hypertransport devices" 85 - default y 86 - depends on PCI && X86_LOCAL_APIC 87 - help 88 - This allows native hypertransport devices to use interrupts. 89 - 90 - If unsure say Y. 91 - 92 83 config PCI_ATS 93 84 bool 94 85
-3
drivers/pci/Makefile
··· 21 21 # Build the PCI MSI interrupt support 22 22 obj-$(CONFIG_PCI_MSI) += msi.o 23 23 24 - # Build the Hypertransport interrupt support 25 - obj-$(CONFIG_HT_IRQ) += htirq.o 26 - 27 24 obj-$(CONFIG_PCI_ATS) += ats.o 28 25 obj-$(CONFIG_PCI_IOV) += iov.o 29 26
-135
drivers/pci/htirq.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * File: htirq.c 4 - * Purpose: Hypertransport Interrupt Capability 5 - * 6 - * Copyright (C) 2006 Linux Networx 7 - * Copyright (C) Eric Biederman <ebiederman@lnxi.com> 8 - */ 9 - 10 - #include <linux/irq.h> 11 - #include <linux/pci.h> 12 - #include <linux/spinlock.h> 13 - #include <linux/export.h> 14 - #include <linux/slab.h> 15 - #include <linux/htirq.h> 16 - 17 - /* Global ht irq lock. 18 - * 19 - * This is needed to serialize access to the data port in hypertransport 20 - * irq capability. 21 - * 22 - * With multiple simultaneous hypertransport irq devices it might pay 23 - * to make this more fine grained. But start with simple, stupid, and correct. 24 - */ 25 - static DEFINE_SPINLOCK(ht_irq_lock); 26 - 27 - void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) 28 - { 29 - struct ht_irq_cfg *cfg = irq_get_handler_data(irq); 30 - unsigned long flags; 31 - 32 - spin_lock_irqsave(&ht_irq_lock, flags); 33 - if (cfg->msg.address_lo != msg->address_lo) { 34 - pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx); 35 - pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo); 36 - } 37 - if (cfg->msg.address_hi != msg->address_hi) { 38 - pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1); 39 - pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi); 40 - } 41 - if (cfg->update) 42 - cfg->update(cfg->dev, irq, msg); 43 - spin_unlock_irqrestore(&ht_irq_lock, flags); 44 - cfg->msg = *msg; 45 - } 46 - 47 - void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) 48 - { 49 - struct ht_irq_cfg *cfg = irq_get_handler_data(irq); 50 - 51 - *msg = cfg->msg; 52 - } 53 - 54 - void mask_ht_irq(struct irq_data *data) 55 - { 56 - struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); 57 - struct ht_irq_msg msg = cfg->msg; 58 - 59 - msg.address_lo |= 1; 60 - write_ht_irq_msg(data->irq, &msg); 61 - } 62 - 63 - void unmask_ht_irq(struct irq_data *data) 64 - { 65 - struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); 66 - struct ht_irq_msg msg = cfg->msg; 67 - 68 - msg.address_lo &= ~1; 69 - write_ht_irq_msg(data->irq, &msg); 70 - } 71 - 72 - /** 73 - * __ht_create_irq - create an irq and attach it to a device. 74 - * @dev: The hypertransport device to find the irq capability on. 75 - * @idx: Which of the possible irqs to attach to. 76 - * @update: Function to be called when changing the htirq message 77 - * 78 - * The irq number of the new irq or a negative error value is returned. 79 - */ 80 - int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) 81 - { 82 - int max_irq, pos, irq; 83 - unsigned long flags; 84 - u32 data; 85 - 86 - pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); 87 - if (!pos) 88 - return -EINVAL; 89 - 90 - /* Verify the idx I want to use is in range */ 91 - spin_lock_irqsave(&ht_irq_lock, flags); 92 - pci_write_config_byte(dev, pos + 2, 1); 93 - pci_read_config_dword(dev, pos + 4, &data); 94 - spin_unlock_irqrestore(&ht_irq_lock, flags); 95 - 96 - max_irq = (data >> 16) & 0xff; 97 - if (idx > max_irq) 98 - return -EINVAL; 99 - 100 - irq = arch_setup_ht_irq(idx, pos, dev, update); 101 - if (irq > 0) 102 - dev_dbg(&dev->dev, "irq %d for HT\n", irq); 103 - 104 - return irq; 105 - } 106 - EXPORT_SYMBOL(__ht_create_irq); 107 - 108 - /** 109 - * ht_create_irq - create an irq and attach it to a device. 110 - * @dev: The hypertransport device to find the irq capability on. 111 - * @idx: Which of the possible irqs to attach to. 112 - * 113 - * ht_create_irq needs to be called for all hypertransport devices 114 - * that generate irqs. 115 - * 116 - * The irq number of the new irq or a negative error value is returned. 117 - */ 118 - int ht_create_irq(struct pci_dev *dev, int idx) 119 - { 120 - return __ht_create_irq(dev, idx, NULL); 121 - } 122 - EXPORT_SYMBOL(ht_create_irq); 123 - 124 - /** 125 - * ht_destroy_irq - destroy an irq created with ht_create_irq 126 - * @irq: irq to be destroyed 127 - * 128 - * This reverses ht_create_irq removing the specified irq from 129 - * existence. The irq should be free before this happens. 130 - */ 131 - void ht_destroy_irq(unsigned int irq) 132 - { 133 - arch_teardown_ht_irq(irq); 134 - } 135 - EXPORT_SYMBOL(ht_destroy_irq);
-39
include/linux/htirq.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef LINUX_HTIRQ_H 3 - #define LINUX_HTIRQ_H 4 - 5 - struct pci_dev; 6 - struct irq_data; 7 - 8 - struct ht_irq_msg { 9 - u32 address_lo; /* low 32 bits of the ht irq message */ 10 - u32 address_hi; /* high 32 bits of the it irq message */ 11 - }; 12 - 13 - typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq, 14 - struct ht_irq_msg *msg); 15 - 16 - struct ht_irq_cfg { 17 - struct pci_dev *dev; 18 - /* Update callback used to cope with buggy hardware */ 19 - ht_irq_update_t *update; 20 - unsigned pos; 21 - unsigned idx; 22 - struct ht_irq_msg msg; 23 - }; 24 - 25 - /* Helper functions.. */ 26 - void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 27 - void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 28 - void mask_ht_irq(struct irq_data *data); 29 - void unmask_ht_irq(struct irq_data *data); 30 - 31 - /* The arch hook for getting things started */ 32 - int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev, 33 - ht_irq_update_t *update); 34 - void arch_teardown_ht_irq(unsigned int irq); 35 - 36 - /* For drivers of buggy hardware */ 37 - int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); 38 - 39 - #endif /* LINUX_HTIRQ_H */
-6
include/linux/pci.h
··· 1485 1485 static inline void pcie_ecrc_get_policy(char *str) { } 1486 1486 #endif 1487 1487 1488 - #ifdef CONFIG_HT_IRQ 1489 - /* The functions a driver should call */ 1490 - int ht_create_irq(struct pci_dev *dev, int idx); 1491 - void ht_destroy_irq(unsigned int irq); 1492 - #endif /* CONFIG_HT_IRQ */ 1493 - 1494 1488 #ifdef CONFIG_PCI_ATS 1495 1489 /* Address Translation Service */ 1496 1490 void pci_ats_init(struct pci_dev *dev);
+177
tools/testing/selftests/x86/5lvl.c
··· 1 + #include <stdio.h> 2 + #include <sys/mman.h> 3 + 4 + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 5 + 6 + #define PAGE_SIZE 4096 7 + #define LOW_ADDR ((void *) (1UL << 30)) 8 + #define HIGH_ADDR ((void *) (1UL << 50)) 9 + 10 + struct testcase { 11 + void *addr; 12 + unsigned long size; 13 + unsigned long flags; 14 + const char *msg; 15 + unsigned int low_addr_required:1; 16 + unsigned int keep_mapped:1; 17 + }; 18 + 19 + static struct testcase testcases[] = { 20 + { 21 + .addr = NULL, 22 + .size = 2 * PAGE_SIZE, 23 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 24 + .msg = "mmap(NULL)", 25 + .low_addr_required = 1, 26 + }, 27 + { 28 + .addr = LOW_ADDR, 29 + .size = 2 * PAGE_SIZE, 30 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 31 + .msg = "mmap(LOW_ADDR)", 32 + .low_addr_required = 1, 33 + }, 34 + { 35 + .addr = HIGH_ADDR, 36 + .size = 2 * PAGE_SIZE, 37 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 38 + .msg = "mmap(HIGH_ADDR)", 39 + .keep_mapped = 1, 40 + }, 41 + { 42 + .addr = HIGH_ADDR, 43 + .size = 2 * PAGE_SIZE, 44 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 45 + .msg = "mmap(HIGH_ADDR) again", 46 + .keep_mapped = 1, 47 + }, 48 + { 49 + .addr = HIGH_ADDR, 50 + .size = 2 * PAGE_SIZE, 51 + .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 52 + .msg = "mmap(HIGH_ADDR, MAP_FIXED)", 53 + }, 54 + { 55 + .addr = (void*) -1, 56 + .size = 2 * PAGE_SIZE, 57 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 58 + .msg = "mmap(-1)", 59 + .keep_mapped = 1, 60 + }, 61 + { 62 + .addr = (void*) -1, 63 + .size = 2 * PAGE_SIZE, 64 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 65 + .msg = "mmap(-1) again", 66 + }, 67 + { 68 + .addr = (void *)((1UL << 47) - PAGE_SIZE), 69 + .size = 2 * PAGE_SIZE, 70 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 71 + .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)", 72 + .low_addr_required = 1, 73 + .keep_mapped = 1, 74 + }, 75 + { 76 + .addr = (void *)((1UL << 47) - PAGE_SIZE / 2), 77 + .size = 2 * PAGE_SIZE, 78 + .flags = MAP_PRIVATE | MAP_ANONYMOUS, 79 + .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)", 80 + .low_addr_required = 1, 81 + .keep_mapped = 1, 82 + }, 83 + { 84 + .addr = (void *)((1UL << 47) - PAGE_SIZE), 85 + .size = 2 * PAGE_SIZE, 86 + .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 87 + .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)", 88 + }, 89 + { 90 + .addr = NULL, 91 + .size = 2UL << 20, 92 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 93 + .msg = "mmap(NULL, MAP_HUGETLB)", 94 + .low_addr_required = 1, 95 + }, 96 + { 97 + .addr = LOW_ADDR, 98 + .size = 2UL << 20, 99 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 100 + .msg = "mmap(LOW_ADDR, MAP_HUGETLB)", 101 + .low_addr_required = 1, 102 + }, 103 + { 104 + .addr = HIGH_ADDR, 105 + .size = 2UL << 20, 106 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 107 + .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)", 108 + .keep_mapped = 1, 109 + }, 110 + { 111 + .addr = HIGH_ADDR, 112 + .size = 2UL << 20, 113 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 114 + .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again", 115 + .keep_mapped = 1, 116 + }, 117 + { 118 + .addr = HIGH_ADDR, 119 + .size = 2UL << 20, 120 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 121 + .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)", 122 + }, 123 + { 124 + .addr = (void*) -1, 125 + .size = 2UL << 20, 126 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 127 + .msg = "mmap(-1, MAP_HUGETLB)", 128 + .keep_mapped = 1, 129 + }, 130 + { 131 + .addr = (void*) -1, 132 + .size = 2UL << 20, 133 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 134 + .msg = "mmap(-1, MAP_HUGETLB) again", 135 + }, 136 + { 137 + .addr = (void *)((1UL << 47) - PAGE_SIZE), 138 + .size = 4UL << 20, 139 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS, 140 + .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)", 141 + .low_addr_required = 1, 142 + .keep_mapped = 1, 143 + }, 144 + { 145 + .addr = (void *)((1UL << 47) - (2UL << 20)), 146 + .size = 4UL << 20, 147 + .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 148 + .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)", 149 + }, 150 + }; 151 + 152 + int main(int argc, char **argv) 153 + { 154 + int i; 155 + void *p; 156 + 157 + for (i = 0; i < ARRAY_SIZE(testcases); i++) { 158 + struct testcase *t = testcases + i; 159 + 160 + p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0); 161 + 162 + printf("%s: %p - ", t->msg, p); 163 + 164 + if (p == MAP_FAILED) { 165 + printf("FAILED\n"); 166 + continue; 167 + } 168 + 169 + if (t->low_addr_required && p >= (void *)(1UL << 47)) 170 + printf("FAILED\n"); 171 + else 172 + printf("OK\n"); 173 + if (!t->keep_mapped) 174 + munmap(p, t->size); 175 + } 176 + return 0; 177 + }
+1 -1
tools/testing/selftests/x86/Makefile
··· 11 11 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 12 12 test_FCMOV test_FCOMI test_FISTTP \ 13 13 vdso_restorer 14 - TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 14 + TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl 15 15 16 16 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) 17 17 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
+2 -2
tools/testing/selftests/x86/mpx-hw.h
··· 52 52 struct mpx_bd_entry { 53 53 union { 54 54 char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES]; 55 - void *contents[1]; 55 + void *contents[0]; 56 56 }; 57 57 } __attribute__((packed)); 58 58 59 59 struct mpx_bt_entry { 60 60 union { 61 61 char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES]; 62 - unsigned long contents[1]; 62 + unsigned long contents[0]; 63 63 }; 64 64 } __attribute__((packed)); 65 65
+4 -1
tools/testing/selftests/x86/pkey-helpers.h
··· 30 30 if (!dprint_in_signal) { 31 31 vprintf(format, ap); 32 32 } else { 33 + int ret; 33 34 int len = vsnprintf(dprint_in_signal_buffer, 34 35 DPRINT_IN_SIGNAL_BUF_SIZE, 35 36 format, ap); ··· 40 39 */ 41 40 if (len > DPRINT_IN_SIGNAL_BUF_SIZE) 42 41 len = DPRINT_IN_SIGNAL_BUF_SIZE; 43 - write(1, dprint_in_signal_buffer, len); 42 + ret = write(1, dprint_in_signal_buffer, len); 43 + if (ret < 0) 44 + abort(); 44 45 } 45 46 va_end(ap); 46 47 }
+5 -5
tools/testing/selftests/x86/protection_keys.c
··· 250 250 unsigned long ip; 251 251 char *fpregs; 252 252 u32 *pkru_ptr; 253 - u64 si_pkey; 253 + u64 siginfo_pkey; 254 254 u32 *si_pkey_ptr; 255 255 int pkru_offset; 256 256 fpregset_t fpregset; ··· 292 292 si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); 293 293 dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); 294 294 dump_mem(si_pkey_ptr - 8, 24); 295 - si_pkey = *si_pkey_ptr; 296 - pkey_assert(si_pkey < NR_PKEYS); 297 - last_si_pkey = si_pkey; 295 + siginfo_pkey = *si_pkey_ptr; 296 + pkey_assert(siginfo_pkey < NR_PKEYS); 297 + last_si_pkey = siginfo_pkey; 298 298 299 299 if ((si->si_code == SEGV_MAPERR) || 300 300 (si->si_code == SEGV_ACCERR) || ··· 306 306 dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); 307 307 /* need __rdpkru() version so we do not do shadow_pkru checking */ 308 308 dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); 309 - dprintf1("si_pkey from siginfo: %jx\n", si_pkey); 309 + dprintf1("pkey from siginfo: %jx\n", siginfo_pkey); 310 310 *(u64 *)pkru_ptr = 0x00000000; 311 311 dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); 312 312 pkru_faults++;