Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix crashes coming out of nap on 32-bit Book3s (eg. powerbooks).

- Fix critical and debug interrupts on BookE, seen as crashes when
using ptrace.

- Fix an oops when running an SMP kernel on a UP system.

- Update pseries LPAR security flavor after partition migration.

- Fix an oops when using kprobes on BookE.

- Fix oops on 32-bit pmac by not calling do_IRQ() from
timer_interrupt().

- Fix softlockups on CPU hotplug into a CPU-less node with xive (P9).

Thanks to Cédric Le Goater, Christophe Leroy, Finn Thain, Geetika
Moolchandani, Laurent Dufour, Laurent Vivier, Nicholas Piggin, Pu Lehui,
Radu Rendec, Srikar Dronamraju, and Stan Johnson.

* tag 'powerpc-5.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/xive: Do not skip CPU-less nodes when creating the IPIs
powerpc/interrupt: Do not call single_step_exception() from other exceptions
powerpc/interrupt: Fix OOPS by not calling do_IRQ() from timer_interrupt()
powerpc/kprobes: Fix kprobe Oops happens in booke
powerpc/pseries: Fix update of LPAR security flavor after LPM
powerpc/smp: Fix OOPS in topology_init()
powerpc/32: Fix critical and debug interrupts on BOOKE
powerpc/32s: Fix napping restore in data storage interrupt (DSI)

+82 -62
+3
arch/powerpc/include/asm/interrupt.h
··· 583 583 584 584 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 585 585 586 + /* irq.c */ 587 + DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 588 + 586 589 void __noreturn unrecoverable_exception(struct pt_regs *regs); 587 590 588 591 void replay_system_reset(void);
+1 -1
arch/powerpc/include/asm/irq.h
··· 52 52 extern void *hardirq_ctx[NR_CPUS]; 53 53 extern void *softirq_ctx[NR_CPUS]; 54 54 55 - extern void do_IRQ(struct pt_regs *regs); 55 + void __do_IRQ(struct pt_regs *regs); 56 56 extern void __init init_IRQ(void); 57 57 extern void __do_irq(struct pt_regs *regs); 58 58
+16
arch/powerpc/include/asm/ptrace.h
··· 70 70 unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ 71 71 }; 72 72 #endif 73 + #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 74 + struct { /* Must be a multiple of 16 bytes */ 75 + unsigned long mas0; 76 + unsigned long mas1; 77 + unsigned long mas2; 78 + unsigned long mas3; 79 + unsigned long mas6; 80 + unsigned long mas7; 81 + unsigned long srr0; 82 + unsigned long srr1; 83 + unsigned long csrr0; 84 + unsigned long csrr1; 85 + unsigned long dsrr0; 86 + unsigned long dsrr1; 87 + }; 88 + #endif 73 89 }; 74 90 #endif 75 91
+14 -17
arch/powerpc/kernel/asm-offsets.c
··· 309 309 STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr); 310 310 #endif 311 311 312 - #if defined(CONFIG_PPC32) 313 - #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 314 - DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); 315 - DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 312 + #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 313 + STACK_PT_REGS_OFFSET(MAS0, mas0); 316 314 /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ 317 - DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 318 - DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); 319 - DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); 320 - DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); 321 - DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); 322 - DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); 323 - DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); 324 - DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); 325 - DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); 326 - DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); 327 - DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); 328 - DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); 329 - #endif 315 + STACK_PT_REGS_OFFSET(MMUCR, mas0); 316 + STACK_PT_REGS_OFFSET(MAS1, mas1); 317 + STACK_PT_REGS_OFFSET(MAS2, mas2); 318 + STACK_PT_REGS_OFFSET(MAS3, mas3); 319 + STACK_PT_REGS_OFFSET(MAS6, mas6); 320 + STACK_PT_REGS_OFFSET(MAS7, mas7); 321 + STACK_PT_REGS_OFFSET(_SRR0, srr0); 322 + STACK_PT_REGS_OFFSET(_SRR1, srr1); 323 + STACK_PT_REGS_OFFSET(_CSRR0, csrr0); 324 + STACK_PT_REGS_OFFSET(_CSRR1, csrr1); 325 + STACK_PT_REGS_OFFSET(_DSRR0, dsrr0); 326 + STACK_PT_REGS_OFFSET(_DSRR1, dsrr1); 330 327 #endif 331 328 332 329 /* About the CPU features table */
+1 -1
arch/powerpc/kernel/head_book3s_32.S
··· 300 300 EXCEPTION_PROLOG_1 301 301 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1 302 302 prepare_transfer_to_handler 303 - lwz r5, _DSISR(r11) 303 + lwz r5, _DSISR(r1) 304 304 andis. r0, r5, DSISR_DABRMATCH@h 305 305 bne- 1f 306 306 bl do_page_fault
+3 -24
arch/powerpc/kernel/head_booke.h
··· 168 168 /* only on e500mc */ 169 169 #define DBG_STACK_BASE dbgirq_ctx 170 170 171 - #define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE) 172 - 173 171 #ifdef CONFIG_SMP 174 172 #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 175 173 mfspr r8,SPRN_PIR; \ 176 174 slwi r8,r8,2; \ 177 175 addis r8,r8,level##_STACK_BASE@ha; \ 178 176 lwz r8,level##_STACK_BASE@l(r8); \ 179 - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; 177 + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; 180 178 #else 181 179 #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ 182 180 lis r8,level##_STACK_BASE@ha; \ 183 181 lwz r8,level##_STACK_BASE@l(r8); \ 184 - addi r8,r8,EXC_LVL_FRAME_OVERHEAD; 182 + addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE; 185 183 #endif 186 184 187 185 /* ··· 206 208 mtmsr r11; \ 207 209 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 208 210 lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ 209 - addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ 211 + addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\ 210 212 beq 1f; \ 211 213 /* COMING FROM USER MODE */ \ 212 214 stw r9,_CCR(r11); /* save CR */\ ··· 513 515 1: prepare_transfer_to_handler; \ 514 516 bl kernel_fp_unavailable_exception; \ 515 517 b interrupt_return 516 - 517 - #else /* __ASSEMBLY__ */ 518 - struct exception_regs { 519 - unsigned long mas0; 520 - unsigned long mas1; 521 - unsigned long mas2; 522 - unsigned long mas3; 523 - unsigned long mas6; 524 - unsigned long mas7; 525 - unsigned long srr0; 526 - unsigned long srr1; 527 - unsigned long csrr0; 528 - unsigned long csrr1; 529 - unsigned long dsrr0; 530 - unsigned long dsrr1; 531 - }; 532 - 533 - /* ensure this structure is always sized to a multiple of the stack alignment */ 534 - #define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16) 535 518 536 519 #endif /* __ASSEMBLY__ */ 537 520 #endif /* __HEAD_BOOKE_H__ */
+6 -1
arch/powerpc/kernel/irq.c
··· 750 750 trace_irq_exit(regs); 751 751 } 752 752 753 - DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 753 + void __do_IRQ(struct pt_regs *regs) 754 754 { 755 755 struct pt_regs *old_regs = set_irq_regs(regs); 756 756 void *cursp, *irqsp, *sirqsp; ··· 772 772 call_do_irq(regs, irqsp); 773 773 774 774 set_irq_regs(old_regs); 775 + } 776 + 777 + DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 778 + { 779 + __do_IRQ(regs); 775 780 } 776 781 777 782 static void *__init alloc_vm_stack(void)
+2 -1
arch/powerpc/kernel/kprobes.c
··· 292 292 if (user_mode(regs)) 293 293 return 0; 294 294 295 - if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) 295 + if (!IS_ENABLED(CONFIG_BOOKE) && 296 + (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))) 296 297 return 0; 297 298 298 299 /*
+1 -1
arch/powerpc/kernel/sysfs.c
··· 1167 1167 * CPU. For instance, the boot cpu might never be valid 1168 1168 * for hotplugging. 1169 1169 */ 1170 - if (smp_ops->cpu_offline_self) 1170 + if (smp_ops && smp_ops->cpu_offline_self) 1171 1171 c->hotpluggable = 1; 1172 1172 #endif 1173 1173
+1 -1
arch/powerpc/kernel/time.c
··· 586 586 587 587 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) 588 588 if (atomic_read(&ppc_n_lost_interrupts) != 0) 589 - do_IRQ(regs); 589 + __do_IRQ(regs); 590 590 #endif 591 591 592 592 old_regs = set_irq_regs(regs);
+7 -2
arch/powerpc/kernel/traps.c
··· 1104 1104 _exception(SIGTRAP, regs, TRAP_UNK, 0); 1105 1105 } 1106 1106 1107 - DEFINE_INTERRUPT_HANDLER(single_step_exception) 1107 + static void __single_step_exception(struct pt_regs *regs) 1108 1108 { 1109 1109 clear_single_step(regs); 1110 1110 clear_br_trace(regs); ··· 1121 1121 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1122 1122 } 1123 1123 1124 + DEFINE_INTERRUPT_HANDLER(single_step_exception) 1125 + { 1126 + __single_step_exception(regs); 1127 + } 1128 + 1124 1129 /* 1125 1130 * After we have successfully emulated an instruction, we have to 1126 1131 * check if the instruction was being single-stepped, and if so, ··· 1135 1130 static void emulate_single_step(struct pt_regs *regs) 1136 1131 { 1137 1132 if (single_stepping(regs)) 1138 - single_step_exception(regs); 1133 + __single_step_exception(regs); 1139 1134 } 1140 1135 1141 1136 static inline int __parse_fpscr(unsigned long fpscr)
+3 -2
arch/powerpc/platforms/pseries/setup.c
··· 539 539 * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if 540 540 * H_CPU_BEHAV_FAVOUR_SECURITY is. 541 541 */ 542 - if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) 542 + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) { 543 543 security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); 544 - else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) 544 + pseries_security_flavor = 0; 545 + } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H) 545 546 pseries_security_flavor = 1; 546 547 else 547 548 pseries_security_flavor = 2;
+24 -11
arch/powerpc/sysdev/xive/common.c
··· 67 67 static struct xive_ipi_desc { 68 68 unsigned int irq; 69 69 char name[16]; 70 + atomic_t started; 70 71 } *xive_ipis; 71 72 72 73 /* ··· 1121 1120 .alloc = xive_ipi_irq_domain_alloc, 1122 1121 }; 1123 1122 1124 - static int __init xive_request_ipi(void) 1123 + static int __init xive_init_ipis(void) 1125 1124 { 1126 1125 struct fwnode_handle *fwnode; 1127 1126 struct irq_domain *ipi_domain; ··· 1145 1144 struct xive_ipi_desc *xid = &xive_ipis[node]; 1146 1145 struct xive_ipi_alloc_info info = { node }; 1147 1146 1148 - /* Skip nodes without CPUs */ 1149 - if (cpumask_empty(cpumask_of_node(node))) 1150 - continue; 1151 - 1152 1147 /* 1153 1148 * Map one IPI interrupt per node for all cpus of that node. 1154 1149 * Since the HW interrupt number doesn't have any meaning, ··· 1156 1159 xid->irq = ret; 1157 1160 1158 1161 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); 1159 - 1160 - ret = request_irq(xid->irq, xive_muxed_ipi_action, 1161 - IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); 1162 - 1163 - WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1164 1162 } 1165 1163 1166 1164 return ret; ··· 1167 1175 out_free_fwnode: 1168 1176 irq_domain_free_fwnode(fwnode); 1169 1177 out: 1178 + return ret; 1179 + } 1180 + 1181 + static int __init xive_request_ipi(unsigned int cpu) 1182 + { 1183 + struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; 1184 + int ret; 1185 + 1186 + if (atomic_inc_return(&xid->started) > 1) 1187 + return 0; 1188 + 1189 + ret = request_irq(xid->irq, xive_muxed_ipi_action, 1190 + IRQF_PERCPU | IRQF_NO_THREAD, 1191 + xid->name, NULL); 1192 + 1193 + WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1170 1194 return ret; 1171 1195 } 1172 1196 ··· 1199 1191 /* Check if we are already setup */ 1200 1192 if (xc->hw_ipi != XIVE_BAD_IRQ) 1201 1193 return 0; 1194 + 1195 + /* Register the IPI */ 1196 + xive_request_ipi(cpu); 1202 1197 1203 1198 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1204 1199 if (xive_ops->get_ipi(cpu, xc)) ··· 1242 1231 if (xc->hw_ipi == XIVE_BAD_IRQ) 1243 1232 return; 1244 1233 1234 + /* TODO: clear IPI mapping */ 1235 + 1245 1236 /* Mask the IPI */ 1246 1237 xive_do_source_set_mask(&xc->ipi_data, true); 1247 1238 ··· 1266 1253 smp_ops->cause_ipi = xive_cause_ipi; 1267 1254 1268 1255 /* Register the IPI */ 1269 - xive_request_ipi(); 1256 + xive_init_ipis(); 1270 1257 1271 1258 /* Allocate and setup IPI for the boot CPU */ 1272 1259 xive_setup_cpu_ipi(smp_processor_id());