Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Add perf support for emitting extended registers for power10.

- A fix for CPU hotplug on pseries, where on large/loaded systems we
may not wait long enough for the CPU to be offlined, leading to
crashes.

- Addition of a raw cputable entry for Power10, which is not required
to boot, but is required to make our PMU setup work correctly in
guests.

- Three fixes for the recent changes on 32-bit Book3S to move modules
into their own segment for strict RWX.

- A fix for a recent change in our powernv PCI code that could lead to
crashes.

- A change to our perf interrupt accounting to avoid soft lockups when
using some events, found by syzkaller.

- A change in the way we handle power loss events from the hypervisor
on pseries. We no longer immediately shut down if we're told we're
running on a UPS.

- A few other minor fixes.

Thanks to Alexey Kardashevskiy, Andreas Schwab, Aneesh Kumar K.V, Anju T
Sudhakar, Athira Rajeev, Christophe Leroy, Frederic Barrat, Greg Kurz,
Kajol Jain, Madhavan Srinivasan, Michael Neuling, Michael Roth,
Nageswara R Sastry, Oliver O'Halloran, Thiago Jung Bauermann,
Vaidyanathan Srinivasan, Vasant Hegde.

* tag 'powerpc-5.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/perf/hv-24x7: Move cpumask file to top folder of hv-24x7 driver
powerpc/32s: Fix module loading failure when VMALLOC_END is over 0xf0000000
powerpc/pseries: Do not initiate shutdown when system is running on UPS
powerpc/perf: Fix soft lockups due to missed interrupt accounting
powerpc/powernv/pci: Fix possible crash when releasing DMA resources
powerpc/pseries/hotplug-cpu: wait indefinitely for vCPU death
powerpc/32s: Fix is_module_segment() when MODULES_VADDR is defined
powerpc/kasan: Fix KASAN_SHADOW_START on BOOK3S_32
powerpc/fixmap: Fix the size of the early debug area
powerpc/pkeys: Fix build error with PPC_MEM_KEYS disabled
powerpc/kernel: Cleanup machine check function declarations
powerpc: Add POWER10 raw mode cputable entry
powerpc/perf: Add extended regs support for power10 platform
powerpc/perf: Add support for outputting extended regs in perf intr_regs
powerpc: Fix P10 PVR revision in /proc/cpuinfo for SMT4 cores

+161 -25
+1 -1
Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
··· 43 43 This sysfs interface exposes the number of cores per chip 44 44 present in the system. 45 45 46 - What: /sys/devices/hv_24x7/interface/cpumask 46 + What: /sys/devices/hv_24x7/cpumask 47 47 Date: July 2020 48 48 Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> 49 49 Description: read only
+5
arch/powerpc/include/asm/cputable.h
··· 9 9 10 10 #ifndef __ASSEMBLY__ 11 11 12 + /* 13 + * Added to include __machine_check_early_realmode_* functions 14 + */ 15 + #include <asm/mce.h> 16 + 12 17 /* This structure can grow, it's real size is used by head.S code 13 18 * via the mkdefs mechanism. 14 19 */
+1 -1
arch/powerpc/include/asm/fixmap.h
··· 52 52 FIX_HOLE, 53 53 /* reserve the top 128K for early debugging purposes */ 54 54 FIX_EARLY_DEBUG_TOP = FIX_HOLE, 55 - FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1, 55 + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1, 56 56 #ifdef CONFIG_HIGHMEM 57 57 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 58 58 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+8 -1
arch/powerpc/include/asm/kasan.h
··· 15 15 #ifndef __ASSEMBLY__ 16 16 17 17 #include <asm/page.h> 18 + #include <linux/sizes.h> 18 19 19 20 #define KASAN_SHADOW_SCALE_SHIFT 3 20 21 22 + #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX) 23 + #define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) 24 + #else 25 + #define KASAN_KERN_START PAGE_OFFSET 26 + #endif 27 + 21 28 #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ 22 - (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT)) 29 + (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT)) 23 30 24 31 #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) 25 32
+7
arch/powerpc/include/asm/mce.h
··· 210 210 #define MCE_EVENT_RELEASE true 211 211 #define MCE_EVENT_DONTRELEASE false 212 212 213 + struct pt_regs; 214 + struct notifier_block; 215 + 213 216 extern void save_mce_event(struct pt_regs *regs, long handled, 214 217 struct mce_error_info *mce_err, uint64_t nip, 215 218 uint64_t addr, uint64_t phys_addr); ··· 228 225 int mce_unregister_notifier(struct notifier_block *nb); 229 226 #ifdef CONFIG_PPC_BOOK3S_64 230 227 void flush_and_reload_slb(void); 228 + long __machine_check_early_realmode_p7(struct pt_regs *regs); 229 + long __machine_check_early_realmode_p8(struct pt_regs *regs); 230 + long __machine_check_early_realmode_p9(struct pt_regs *regs); 231 + long __machine_check_early_realmode_p10(struct pt_regs *regs); 231 232 #endif /* CONFIG_PPC_BOOK3S_64 */ 232 233 #endif /* __ASM_PPC64_MCE_H__ */
+3
arch/powerpc/include/asm/perf_event.h
··· 40 40 41 41 /* To support perf_regs sier update */ 42 42 extern bool is_sier_available(void); 43 + /* To define perf extended regs mask value */ 44 + extern u64 PERF_REG_EXTENDED_MASK; 45 + #define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK 43 46 #endif
+5
arch/powerpc/include/asm/perf_event_server.h
··· 62 62 int *blacklist_ev; 63 63 /* BHRB entries in the PMU */ 64 64 int bhrb_nr; 65 + /* 66 + * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if 67 + * the pmu supports extended perf regs capability 68 + */ 69 + int capabilities; 65 70 }; 66 71 67 72 /*
+19 -1
arch/powerpc/include/uapi/asm/perf_regs.h
··· 48 48 PERF_REG_POWERPC_DSISR, 49 49 PERF_REG_POWERPC_SIER, 50 50 PERF_REG_POWERPC_MMCRA, 51 - PERF_REG_POWERPC_MAX, 51 + /* Extended registers */ 52 + PERF_REG_POWERPC_MMCR0, 53 + PERF_REG_POWERPC_MMCR1, 54 + PERF_REG_POWERPC_MMCR2, 55 + PERF_REG_POWERPC_MMCR3, 56 + PERF_REG_POWERPC_SIER2, 57 + PERF_REG_POWERPC_SIER3, 58 + /* Max regs without the extended regs */ 59 + PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1, 52 60 }; 61 + 62 + #define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1) 63 + 64 + /* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */ 65 + #define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK) 66 + /* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */ 67 + #define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK) 68 + 69 + #define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1) 70 + #define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1) 53 71 #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
+19 -3
arch/powerpc/kernel/cputable.c
··· 72 72 extern void __restore_cpu_power9(void); 73 73 extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); 74 74 extern void __restore_cpu_power10(void); 75 - extern long __machine_check_early_realmode_p7(struct pt_regs *regs); 76 - extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 77 - extern long __machine_check_early_realmode_p9(struct pt_regs *regs); 78 75 #endif /* CONFIG_PPC64 */ 79 76 #if defined(CONFIG_E500) 80 77 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); ··· 538 541 .cpu_restore = __restore_cpu_power9, 539 542 .machine_check_early = __machine_check_early_realmode_p9, 540 543 .platform = "power9", 544 + }, 545 + { /* Power10 */ 546 + .pvr_mask = 0xffff0000, 547 + .pvr_value = 0x00800000, 548 + .cpu_name = "POWER10 (raw)", 549 + .cpu_features = CPU_FTRS_POWER10, 550 + .cpu_user_features = COMMON_USER_POWER10, 551 + .cpu_user_features2 = COMMON_USER2_POWER10, 552 + .mmu_features = MMU_FTRS_POWER10, 553 + .icache_bsize = 128, 554 + .dcache_bsize = 128, 555 + .num_pmcs = 6, 556 + .pmc_type = PPC_PMC_IBM, 557 + .oprofile_cpu_type = "ppc64/power10", 558 + .oprofile_type = PPC_OPROFILE_INVALID, 559 + .cpu_setup = __setup_cpu_power10, 560 + .cpu_restore = __restore_cpu_power10, 561 + .machine_check_early = __machine_check_early_realmode_p10, 562 + .platform = "power10", 541 563 }, 542 564 { /* Cell Broadband Engine */ 543 565 .pvr_mask = 0xffff0000,
-4
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 64 64 * Set up the base CPU 65 65 */ 66 66 67 - extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 68 - extern long __machine_check_early_realmode_p9(struct pt_regs *regs); 69 - extern long __machine_check_early_realmode_p10(struct pt_regs *regs); 70 - 71 67 static int hv_mode; 72 68 73 69 static struct {
+1
arch/powerpc/kernel/setup-common.c
··· 311 311 min = pvr & 0xFF; 312 312 break; 313 313 case 0x004e: /* POWER9 bits 12-15 give chip type */ 314 + case 0x0080: /* POWER10 bit 12 gives SMT8/4 */ 314 315 maj = (pvr >> 8) & 0x0F; 315 316 min = pvr & 0xFF; 316 317 break;
+8 -1
arch/powerpc/mm/book3s32/mmu.c
··· 191 191 { 192 192 if (!IS_ENABLED(CONFIG_MODULES)) 193 193 return false; 194 + #ifdef MODULES_VADDR 195 + if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) 196 + return false; 197 + if (addr > ALIGN(MODULES_END, SZ_256M) - 1) 198 + return false; 199 + #else 194 200 if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M)) 195 201 return false; 196 - if (addr >= ALIGN(VMALLOC_END, SZ_256M)) 202 + if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1) 197 203 return false; 204 + #endif 198 205 return true; 199 206 } 200 207
+3 -1
arch/powerpc/mm/book3s64/hash_utils.c
··· 1115 1115 && cpu_has_feature(CPU_FTR_HVMODE)) 1116 1116 tlbiel_all(); 1117 1117 1118 - if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY)) 1118 + #ifdef CONFIG_PPC_MEM_KEYS 1119 + if (mmu_has_feature(MMU_FTR_PKEY)) 1119 1120 mtspr(SPRN_UAMOR, default_uamor); 1121 + #endif 1120 1122 } 1121 1123 #endif /* CONFIG_SMP */ 1122 1124
+5
arch/powerpc/perf/core-book3s.c
··· 2141 2141 2142 2142 if (perf_event_overflow(event, &data, regs)) 2143 2143 power_pmu_stop(event, 0); 2144 + } else if (period) { 2145 + /* Account for interrupt in case of invalid SIAR */ 2146 + if (perf_event_account_interrupt(event)) 2147 + power_pmu_stop(event, 0); 2144 2148 } 2145 2149 } 2146 2150 ··· 2327 2323 pmu->name); 2328 2324 2329 2325 power_pmu.attr_groups = ppmu->attr_groups; 2326 + power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS); 2330 2327 2331 2328 #ifdef MSR_HV 2332 2329 /*
+10 -1
arch/powerpc/perf/hv-24x7.c
··· 1128 1128 NULL, 1129 1129 }; 1130 1130 1131 + static struct attribute *cpumask_attrs[] = { 1132 + &dev_attr_cpumask.attr, 1133 + NULL, 1134 + }; 1135 + 1136 + static struct attribute_group cpumask_attr_group = { 1137 + .attrs = cpumask_attrs, 1138 + }; 1139 + 1131 1140 static struct attribute *if_attrs[] = { 1132 1141 &dev_attr_catalog_len.attr, 1133 1142 &dev_attr_catalog_version.attr, ··· 1144 1135 &dev_attr_sockets.attr, 1145 1136 &dev_attr_chipspersocket.attr, 1146 1137 &dev_attr_coresperchip.attr, 1147 - &dev_attr_cpumask.attr, 1148 1138 NULL, 1149 1139 }; 1150 1140 ··· 1159 1151 &event_desc_group, 1160 1152 &event_long_desc_group, 1161 1153 &if_group, 1154 + &cpumask_attr_group, 1162 1155 NULL, 1163 1156 }; 1164 1157
+41 -3
arch/powerpc/perf/perf_regs.c
··· 13 13 #include <asm/ptrace.h> 14 14 #include <asm/perf_regs.h> 15 15 16 + u64 PERF_REG_EXTENDED_MASK; 17 + 16 18 #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) 17 19 18 - #define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1)) 20 + #define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK)) 19 21 20 22 static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { 21 23 PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]), ··· 71 69 PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), 72 70 }; 73 71 72 + /* Function to return the extended register values */ 73 + static u64 get_ext_regs_value(int idx) 74 + { 75 + switch (idx) { 76 + case PERF_REG_POWERPC_MMCR0: 77 + return mfspr(SPRN_MMCR0); 78 + case PERF_REG_POWERPC_MMCR1: 79 + return mfspr(SPRN_MMCR1); 80 + case PERF_REG_POWERPC_MMCR2: 81 + return mfspr(SPRN_MMCR2); 82 + #ifdef CONFIG_PPC64 83 + case PERF_REG_POWERPC_MMCR3: 84 + return mfspr(SPRN_MMCR3); 85 + case PERF_REG_POWERPC_SIER2: 86 + return mfspr(SPRN_SIER2); 87 + case PERF_REG_POWERPC_SIER3: 88 + return mfspr(SPRN_SIER3); 89 + #endif 90 + default: return 0; 91 + } 92 + } 93 + 74 94 u64 perf_reg_value(struct pt_regs *regs, int idx) 75 95 { 76 - if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX)) 77 - return 0; 96 + u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX; 97 + 98 + if (cpu_has_feature(CPU_FTR_ARCH_31)) 99 + perf_reg_extended_max = PERF_REG_MAX_ISA_31; 100 + else if (cpu_has_feature(CPU_FTR_ARCH_300)) 101 + perf_reg_extended_max = PERF_REG_MAX_ISA_300; 78 102 79 103 if (idx == PERF_REG_POWERPC_SIER && 80 104 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || ··· 111 83 if (idx == PERF_REG_POWERPC_MMCRA && 112 84 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || 113 85 IS_ENABLED(CONFIG_PPC32))) 86 + return 0; 87 + 88 + if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max) 89 + return get_ext_regs_value(idx); 90 + 91 + /* 92 + * If the idx is referring to value beyond the 93 + * supported registers, return 0 with a warning 94 + */ 95 + if (WARN_ON_ONCE(idx >= perf_reg_extended_max)) 114 96 return 0; 115 97 116 98 return regs_get_register(regs, pt_regs_offset[idx]);
+6
arch/powerpc/perf/power10-pmu.c
··· 87 87 #define POWER10_MMCRA_IFM3 0x00000000C0000000UL 88 88 #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL 89 89 90 + extern u64 PERF_REG_EXTENDED_MASK; 91 + 90 92 /* Table of alternatives, sorted by column 0 */ 91 93 static const unsigned int power10_event_alternatives[][MAX_ALT] = { 92 94 { PM_RUN_CYC_ALT, PM_RUN_CYC }, ··· 399 397 .cache_events = &power10_cache_events, 400 398 .attr_groups = power10_pmu_attr_groups, 401 399 .bhrb_nr = 32, 400 + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, 402 401 }; 403 402 404 403 int init_power10_pmu(void) ··· 410 407 if (!cur_cpu_spec->oprofile_cpu_type || 411 408 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10")) 412 409 return -ENODEV; 410 + 411 + /* Set the PERF_REG_EXTENDED_MASK here */ 412 + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31; 413 413 414 414 rc = register_power_pmu(&power10_pmu); 415 415 if (rc)
+6
arch/powerpc/perf/power9-pmu.c
··· 90 90 #define POWER9_MMCRA_IFM3 0x00000000C0000000UL 91 91 #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL 92 92 93 + extern u64 PERF_REG_EXTENDED_MASK; 94 + 93 95 /* Nasty Power9 specific hack */ 94 96 #define PVR_POWER9_CUMULUS 0x00002000 95 97 ··· 436 434 .cache_events = &power9_cache_events, 437 435 .attr_groups = power9_pmu_attr_groups, 438 436 .bhrb_nr = 32, 437 + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, 439 438 }; 440 439 441 440 int init_power9_pmu(void) ··· 459 456 power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd22_bl_ev); 460 457 } 461 458 } 459 + 460 + /* Set the PERF_REG_EXTENDED_MASK here */ 461 + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300; 462 462 463 463 rc = register_power_pmu(&power9_pmu); 464 464 if (rc)
+1 -1
arch/powerpc/platforms/powernv/pci-ioda.c
··· 2705 2705 struct iommu_table *tbl = pe->table_group.tables[0]; 2706 2706 int64_t rc; 2707 2707 2708 - if (pe->dma_setup_done) 2708 + if (!pe->dma_setup_done) 2709 2709 return; 2710 2710 2711 2711 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+12 -6
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 107 107 */ 108 108 static void pseries_cpu_die(unsigned int cpu) 109 109 { 110 - int tries; 111 110 int cpu_status = 1; 112 111 unsigned int pcpu = get_hard_smp_processor_id(cpu); 112 + unsigned long timeout = jiffies + msecs_to_jiffies(120000); 113 113 114 - for (tries = 0; tries < 25; tries++) { 114 + while (true) { 115 115 cpu_status = smp_query_cpu_stopped(pcpu); 116 116 if (cpu_status == QCSS_STOPPED || 117 117 cpu_status == QCSS_HARDWARE_ERROR) 118 118 break; 119 - cpu_relax(); 120 119 120 + if (time_after(jiffies, timeout)) { 121 + pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", 122 + cpu, pcpu); 123 + timeout = jiffies + msecs_to_jiffies(120000); 124 + } 125 + 126 + cond_resched(); 121 127 } 122 128 123 - if (cpu_status != 0) { 124 - printk("Querying DEAD? cpu %i (%i) shows %i\n", 125 - cpu, pcpu, cpu_status); 129 + if (cpu_status == QCSS_HARDWARE_ERROR) { 130 + pr_warn("CPU %i (hwid %i) reported error while dying\n", 131 + cpu, pcpu); 126 132 } 127 133 128 134 /* Isolation and deallocation are definitely done by
-1
arch/powerpc/platforms/pseries/ras.c
··· 184 184 case EPOW_SHUTDOWN_ON_UPS: 185 185 pr_emerg("Loss of system power detected. System is running on" 186 186 " UPS/battery. Check RTAS error log for details\n"); 187 - orderly_poweroff(true); 188 187 break; 189 188 190 189 case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: