Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
"A bit of a big batch, partly because I didn't send any last week, and
also just because the BPF fixes happened to land this week.

Summary:

- Fix a regression hit by the IPR SCSI driver, introduced by the
recent addition of MSI domains on pseries.

- A big series including 8 BPF fixes, some with potential security
impact and the rest various code generation issues.

- Fix our program check assembler entry path, which was accidentally
jumping into a gas macro and generating strange stack frames, which
could confuse find_bug().

- A couple of fixes, and related changes, to fix corner cases in our
machine check handling.

- Fix our DMA IOMMU ops, which were not always returning the optimal
DMA mask, leading to at least one device falling back to 32-bit DMA
when it shouldn't.

- A fix for KUAP handling on 32-bit Book3S.

- Fix crashes seen when kdumping on some pseries systems.

Thanks to Naveen N. Rao, Nicholas Piggin, Alexey Kardashevskiy, Cédric
Le Goater, Christophe Leroy, Mahesh Salgaonkar, Abdul Haleem,
Christoph Hellwig, Johan Almbladh, Stan Johnson"

* tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init
powerpc/32s: Fix kuap_kernel_restore()
powerpc/pseries/msi: Add an empty irq_write_msi_msg() handler
powerpc/64s: Fix unrecoverable MCE calling async handler from NMI
powerpc/64/interrupt: Reconcile soft-mask state in NMI and fix false BUG
powerpc/64: warn if local irqs are enabled in NMI or hardirq context
powerpc/traps: do not enable irqs in _exception
powerpc/64s: fix program check interrupt emergency stack path
powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000
powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit BPF_END
powerpc/bpf ppc32: Fix JMP32_JSET_K
powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation
powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC
powerpc/security: Add a helper to query stf_barrier type
powerpc/bpf: Fix BPF_SUB when imm == 0x80000000
powerpc/bpf: Fix BPF_MOD when imm == 1
powerpc/bpf: Validate branch ranges
powerpc/lib: Add helper to check if offset is within conditional branch range
powerpc/iommu: Report the correct most efficient DMA mask for PCI devices

+234 -75
+8
arch/powerpc/include/asm/book3s/32/kup.h
··· 136 136 if (kuap_is_disabled()) 137 137 return; 138 138 139 + if (unlikely(kuap != KUAP_NONE)) { 140 + current->thread.kuap = KUAP_NONE; 141 + kuap_lock(kuap, false); 142 + } 143 + 144 + if (likely(regs->kuap == KUAP_NONE)) 145 + return; 146 + 139 147 current->thread.kuap = regs->kuap; 140 148 141 149 kuap_unlock(regs->kuap, false);
+1
arch/powerpc/include/asm/code-patching.h
··· 23 23 #define BRANCH_ABSOLUTE 0x2 24 24 25 25 bool is_offset_in_branch_range(long offset); 26 + bool is_offset_in_cond_branch_range(long offset); 26 27 int create_branch(struct ppc_inst *instr, const u32 *addr, 27 28 unsigned long target, int flags); 28 29 int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
+10 -8
arch/powerpc/include/asm/interrupt.h
··· 265 265 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 266 266 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 267 267 268 - if (is_implicit_soft_masked(regs)) { 269 - // Adjust regs->softe soft implicit soft-mask, so 270 - // arch_irq_disabled_regs(regs) behaves as expected. 268 + if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 269 + /* 270 + * Adjust regs->softe to be soft-masked if it had not been 271 + * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 272 + * not yet set disabled), or if it was in an implicit soft 273 + * masked state. This makes arch_irq_disabled_regs(regs) 274 + * behave as expected. 275 + */ 271 276 regs->softe = IRQS_ALL_DISABLED; 272 277 } 273 - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 274 - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); 275 278 276 279 /* Don't do any per-CPU operations until interrupt state is fixed */ 277 280 ··· 528 525 /* kernel/traps.c */ 529 526 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 530 527 #ifdef CONFIG_PPC_BOOK3S_64 531 - DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); 532 - #else 533 - DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 528 + DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 534 529 #endif 530 + DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 535 531 DECLARE_INTERRUPT_HANDLER(SMIException); 536 532 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 537 533 DECLARE_INTERRUPT_HANDLER(unknown_exception);
+5
arch/powerpc/include/asm/security_features.h
··· 39 39 return !!(powerpc_security_features & feature); 40 40 } 41 41 42 + #ifdef CONFIG_PPC_BOOK3S_64 43 + enum stf_barrier_type stf_barrier_type_get(void); 44 + #else 45 + static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; } 46 + #endif 42 47 43 48 // Features indicating support for Spectre/Meltdown mitigations 44 49
+9
arch/powerpc/kernel/dma-iommu.c
··· 184 184 struct iommu_table *tbl = get_iommu_table_base(dev); 185 185 u64 mask; 186 186 187 + if (dev_is_pci(dev)) { 188 + u64 bypass_mask = dma_direct_get_required_mask(dev); 189 + 190 + if (dma_iommu_dma_supported(dev, bypass_mask)) { 191 + dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask); 192 + return bypass_mask; 193 + } 194 + } 195 + 187 196 if (!tbl) 188 197 return 0; 189 198
+16 -9
arch/powerpc/kernel/exceptions-64s.S
··· 1243 1243 li r10,MSR_RI 1244 1244 mtmsrd r10,1 1245 1245 addi r3,r1,STACK_FRAME_OVERHEAD 1246 - bl machine_check_exception 1246 + bl machine_check_exception_async 1247 1247 b interrupt_return_srr 1248 1248 1249 1249 ··· 1303 1303 subi r12,r12,1 1304 1304 sth r12,PACA_IN_MCE(r13) 1305 1305 1306 - /* Invoke machine_check_exception to print MCE event and panic. */ 1306 + /* 1307 + * Invoke machine_check_exception to print MCE event and panic. 1308 + * This is the NMI version of the handler because we are called from 1309 + * the early handler which is a true NMI. 1310 + */ 1307 1311 addi r3,r1,STACK_FRAME_OVERHEAD 1308 1312 bl machine_check_exception 1309 1313 ··· 1669 1665 */ 1670 1666 1671 1667 andi. r10,r12,MSR_PR 1672 - bne 2f /* If userspace, go normal path */ 1668 + bne .Lnormal_stack /* If userspace, go normal path */ 1673 1669 1674 1670 andis. r10,r12,(SRR1_PROGTM)@h 1675 - bne 1f /* If TM, emergency */ 1671 + bne .Lemergency_stack /* If TM, emergency */ 1676 1672 1677 1673 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */ 1678 - blt 2f /* normal path if not */ 1674 + blt .Lnormal_stack /* normal path if not */ 1679 1675 1680 1676 /* Use the emergency stack */ 1681 - 1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1677 + .Lemergency_stack: 1678 + andi. r10,r12,MSR_PR /* Set CR0 correctly for label */ 1682 1679 /* 3 in EXCEPTION_PROLOG_COMMON */ 1683 1680 mr r10,r1 /* Save r1 */ 1684 1681 ld r1,PACAEMERGSP(r13) /* Use emergency stack */ 1685 1682 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 1686 1683 __ISTACK(program_check)=0 1687 1684 __GEN_COMMON_BODY program_check 1688 - b 3f 1689 - 2: 1685 + b .Ldo_program_check 1686 + 1687 + .Lnormal_stack: 1690 1688 __ISTACK(program_check)=1 1691 1689 __GEN_COMMON_BODY program_check 1692 - 3: 1690 + 1691 + .Ldo_program_check: 1693 1692 addi r3,r1,STACK_FRAME_OVERHEAD 1694 1693 bl program_check_exception 1695 1694 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+6
arch/powerpc/kernel/irq.c
··· 229 229 return; 230 230 } 231 231 232 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 233 + WARN_ON_ONCE(in_nmi() || in_hardirq()); 234 + 232 235 /* 233 236 * After the stb, interrupts are unmasked and there are no interrupts 234 237 * pending replay. The restart sequence makes this atomic with ··· 323 320 irq_soft_mask_set(mask); 324 321 if (mask) 325 322 return; 323 + 324 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 325 + WARN_ON_ONCE(in_nmi() || in_hardirq()); 326 326 327 327 /* 328 328 * From this point onward, we can take interrupts, preempt,
+5
arch/powerpc/kernel/security.c
··· 263 263 264 264 early_param("no_stf_barrier", handle_no_stf_barrier); 265 265 266 + enum stf_barrier_type stf_barrier_type_get(void) 267 + { 268 + return stf_enabled_flush_types; 269 + } 270 + 266 271 /* This is the generic flag used by other architectures */ 267 272 static int __init handle_ssbd(char *p) 268 273 {
+27 -16
arch/powerpc/kernel/traps.c
··· 340 340 return false; 341 341 } 342 342 343 - show_signal_msg(signr, regs, code, addr); 343 + /* 344 + * Must not enable interrupts even for user-mode exception, because 345 + * this can be called from machine check, which may be a NMI or IRQ 346 + * which don't like interrupts being enabled. Could check for 347 + * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good 348 + * reason why _exception() should enable irqs for an exception handler, 349 + * the handlers themselves do that directly. 350 + */ 344 351 345 - if (arch_irqs_disabled()) 346 - interrupt_cond_local_irq_enable(regs); 352 + show_signal_msg(signr, regs, code, addr); 347 353 348 354 current->thread.trap_nr = code; 349 355 ··· 796 790 * do_exit() checks for in_interrupt() and panics in that case, so 797 791 * exit the irq/nmi before calling die. 798 792 */ 799 - if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 800 - irq_exit(); 801 - else 793 + if (in_nmi()) 802 794 nmi_exit(); 795 + else 796 + irq_exit(); 803 797 die(str, regs, err); 804 798 } 805 799 806 800 /* 807 - * BOOK3S_64 does not call this handler as a non-maskable interrupt 801 + * BOOK3S_64 does not usually call this handler as a non-maskable interrupt 808 802 * (it uses its own early real-mode handler to handle the MCE proper 809 803 * and then raises irq_work to call this handler when interrupts are 810 - * enabled). 804 + * enabled). The only time when this is not true is if the early handler 805 + * is unrecoverable, then it does call this directly to try to get a 806 + * message out. 811 807 */ 812 - #ifdef CONFIG_PPC_BOOK3S_64 813 - DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) 814 - #else 815 - DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) 816 - #endif 808 + static void __machine_check_exception(struct pt_regs *regs) 817 809 { 818 810 int recover = 0; 819 811 ··· 845 841 /* Must die if the interrupt is not recoverable */ 846 842 if (regs_is_unrecoverable(regs)) 847 843 die_mce("Unrecoverable Machine check", regs, SIGBUS); 844 + } 848 845 849 846 #ifdef CONFIG_PPC_BOOK3S_64 850 - return; 851 - #else 852 - return 0; 847 + DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async) 848 + { 849 + __machine_check_exception(regs); 850 + } 853 851 #endif 852 + DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) 853 + { 854 + __machine_check_exception(regs); 855 + 856 + return 0; 854 857 } 855 858 856 859 DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
+6 -1
arch/powerpc/lib/code-patching.c
··· 228 228 return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); 229 229 } 230 230 231 + bool is_offset_in_cond_branch_range(long offset) 232 + { 233 + return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); 234 + } 235 + 231 236 /* 232 237 * Helper to check if a given instruction is a conditional branch 233 238 * Derived from the conditional checks in analyse_instr() ··· 285 280 offset = offset - (unsigned long)addr; 286 281 287 282 /* Check we can represent the target in the instruction format */ 288 - if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) 283 + if (!is_offset_in_cond_branch_range(offset)) 289 284 return 1; 290 285 291 286 /* Mask out the flags and target, so they don't step on each other. */
+21 -12
arch/powerpc/net/bpf_jit.h
··· 24 24 #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) 25 25 26 26 /* Long jump; (unconditional 'branch') */ 27 - #define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ 28 - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) 27 + #define PPC_JMP(dest) \ 28 + do { \ 29 + long offset = (long)(dest) - (ctx->idx * 4); \ 30 + if (!is_offset_in_branch_range(offset)) { \ 31 + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ 32 + return -ERANGE; \ 33 + } \ 34 + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ 35 + } while (0) 36 + 29 37 /* blr; (unconditional 'branch' with link) to absolute address */ 30 38 #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ 31 39 (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) 32 40 /* "cond" here covers BO:BI fields. */ 33 - #define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ 34 - (((cond) & 0x3ff) << 16) | \ 35 - (((dest) - (ctx->idx * 4)) & \ 36 - 0xfffc)) 41 + #define PPC_BCC_SHORT(cond, dest) \ 42 + do { \ 43 + long offset = (long)(dest) - (ctx->idx * 4); \ 44 + if (!is_offset_in_cond_branch_range(offset)) { \ 45 + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ 46 + return -ERANGE; \ 47 + } \ 48 + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ 49 + } while (0) 50 + 37 51 /* Sign-extended 32-bit immediate load */ 38 52 #define PPC_LI32(d, i) do { \ 39 53 if ((int)(uintptr_t)(i) >= -32768 && \ ··· 92 78 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) 93 79 #endif 94 80 95 - static inline bool is_nearbranch(int offset) 96 - { 97 - return (offset < 32768) && (offset >= -32768); 98 - } 99 - 100 81 /* 101 82 * The fly in the ointment of code size changing from pass to pass is 102 83 * avoided by padding the short branch case with a NOP. If code size differs ··· 100 91 * state. 101 92 */ 102 93 #define PPC_BCC(cond, dest) do { \ 103 - if (is_nearbranch((dest) - (ctx->idx * 4))) { \ 94 + if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \ 104 95 PPC_BCC_SHORT(cond, dest); \ 105 96 EMIT(PPC_RAW_NOP()); \ 106 97 } else { \
+4 -4
arch/powerpc/net/bpf_jit64.h
··· 16 16 * with our redzone usage. 17 17 * 18 18 * [ prev sp ] <------------- 19 - * [ nv gpr save area ] 6*8 | 19 + * [ nv gpr save area ] 5*8 | 20 20 * [ tail_call_cnt ] 8 | 21 - * [ local_tmp_var ] 8 | 21 + * [ local_tmp_var ] 16 | 22 22 * fp (r31) --> [ ebpf stack space ] upto 512 | 23 23 * [ frame header ] 32/112 | 24 24 * sp (r1) ---> [ stack pointer ] -------------- 25 25 */ 26 26 27 27 /* for gpr non volatile registers BPG_REG_6 to 10 */ 28 - #define BPF_PPC_STACK_SAVE (6*8) 28 + #define BPF_PPC_STACK_SAVE (5*8) 29 29 /* for bpf JIT code internal usage */ 30 - #define BPF_PPC_STACK_LOCALS 16 30 + #define BPF_PPC_STACK_LOCALS 24 31 31 /* stack frame excluding BPF stack, ensure this is quadword aligned */ 32 32 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ 33 33 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
+5 -1
arch/powerpc/net/bpf_jit_comp.c
··· 210 210 /* Now build the prologue, body code & epilogue for real. */ 211 211 cgctx.idx = 0; 212 212 bpf_jit_build_prologue(code_base, &cgctx); 213 - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); 213 + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { 214 + bpf_jit_binary_free(bpf_hdr); 215 + fp = org_fp; 216 + goto out_addrs; 217 + } 214 218 bpf_jit_build_epilogue(code_base, &cgctx); 215 219 216 220 if (bpf_jit_enable > 1)
+10 -6
arch/powerpc/net/bpf_jit_comp32.c
··· 200 200 } 201 201 } 202 202 203 - static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 203 + static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 204 204 { 205 205 /* 206 206 * By now, the eBPF program has already setup parameters in r3-r6 ··· 261 261 bpf_jit_emit_common_epilogue(image, ctx); 262 262 263 263 EMIT(PPC_RAW_BCTR()); 264 + 264 265 /* out: */ 266 + return 0; 265 267 } 266 268 267 269 /* Assemble the body code between the prologue & epilogue */ ··· 357 355 PPC_LI32(_R0, imm); 358 356 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0)); 359 357 } 360 - if (imm >= 0) 358 + if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) 361 359 EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h)); 362 360 else 363 361 EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h)); ··· 625 623 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 626 624 break; 627 625 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ 628 - EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg)); 626 + EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); 629 627 break; 630 628 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ 631 629 bpf_set_seen_register(ctx, tmp_reg); ··· 1075 1073 break; 1076 1074 case BPF_JMP32 | BPF_JSET | BPF_K: 1077 1075 /* andi does not sign-extend the immediate */ 1078 - if (imm >= -32768 && imm < 32768) { 1076 + if (imm >= 0 && imm < 32768) { 1079 1077 /* PPC_ANDI is _only/always_ dot-form */ 1080 1078 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1081 1079 } else { ··· 1092 1090 */ 1093 1091 case BPF_JMP | BPF_TAIL_CALL: 1094 1092 ctx->seen |= SEEN_TAILCALL; 1095 - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1093 + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1094 + if (ret < 0) 1095 + return ret; 1096 1096 break; 1097 1097 1098 1098 default: ··· 1107 1103 return -EOPNOTSUPP; 1108 1104 } 1109 1105 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && 1110 - !insn_is_zext(&insn[i + 1])) 1106 + !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) 1111 1107 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1112 1108 } 1113 1109
+82 -18
arch/powerpc/net/bpf_jit_comp64.c
··· 15 15 #include <linux/if_vlan.h> 16 16 #include <asm/kprobes.h> 17 17 #include <linux/bpf.h> 18 + #include <asm/security_features.h> 18 19 19 20 #include "bpf_jit64.h" 20 21 ··· 36 35 * [ prev sp ] <------------- 37 36 * [ ... ] | 38 37 * sp (r1) ---> [ stack pointer ] -------------- 39 - * [ nv gpr save area ] 6*8 38 + * [ nv gpr save area ] 5*8 40 39 * [ tail_call_cnt ] 8 41 - * [ local_tmp_var ] 8 40 + * [ local_tmp_var ] 16 42 41 * [ unused red zone ] 208 bytes protected 43 42 */ 44 43 static int bpf_jit_stack_local(struct codegen_context *ctx) ··· 46 45 if (bpf_has_stack_frame(ctx)) 47 46 return STACK_FRAME_MIN_SIZE + ctx->stack_size; 48 47 else 49 - return -(BPF_PPC_STACK_SAVE + 16); 48 + return -(BPF_PPC_STACK_SAVE + 24); 50 49 } 51 50 52 51 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) 53 52 { 54 - return bpf_jit_stack_local(ctx) + 8; 53 + return bpf_jit_stack_local(ctx) + 16; 55 54 } 56 55 57 56 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) ··· 207 206 EMIT(PPC_RAW_BCTRL()); 208 207 } 209 208 210 - static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 209 + static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 211 210 { 212 211 /* 213 212 * By now, the eBPF program has already setup parameters in r3, r4 and r5 ··· 268 267 bpf_jit_emit_common_epilogue(image, ctx); 269 268 270 269 EMIT(PPC_RAW_BCTR()); 270 + 271 271 /* out: */ 272 + return 0; 272 273 } 274 + 275 + /* 276 + * We spill into the redzone always, even if the bpf program has its own stackframe. 277 + * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() 278 + */ 279 + void bpf_stf_barrier(void); 280 + 281 + asm ( 282 + " .global bpf_stf_barrier ;" 283 + " bpf_stf_barrier: ;" 284 + " std 21,-64(1) ;" 285 + " std 22,-56(1) ;" 286 + " sync ;" 287 + " ld 21,-64(1) ;" 288 + " ld 22,-56(1) ;" 289 + " ori 31,31,0 ;" 290 + " .rept 14 ;" 291 + " b 1f ;" 292 + " 1: ;" 293 + " .endr ;" 294 + " blr ;" 295 + ); 273 296 274 297 /* Assemble the body code between the prologue & epilogue */ 275 298 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, 276 299 u32 *addrs, bool extra_pass) 277 300 { 301 + enum stf_barrier_type stf_barrier = stf_barrier_type_get(); 278 302 const struct bpf_insn *insn = fp->insnsi; 279 303 int flen = fp->len; 280 304 int i, ret; ··· 354 328 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); 355 329 goto bpf_alu32_trunc; 356 330 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ 357 - case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 358 331 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ 332 + if (!imm) { 333 + goto bpf_alu32_trunc; 334 + } else if (imm >= -32768 && imm < 32768) { 335 + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); 336 + } else { 337 + PPC_LI32(b2p[TMP_REG_1], imm); 338 + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); 339 + } 340 + goto bpf_alu32_trunc; 341 + case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 359 342 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ 360 - if (BPF_OP(code) == BPF_SUB) 361 - imm = -imm; 362 - if (imm) { 363 - if (imm >= -32768 && imm < 32768) 364 - EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); 365 - else { 366 - PPC_LI32(b2p[TMP_REG_1], imm); 367 - EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); 368 - } 343 + if (!imm) { 344 + goto bpf_alu32_trunc; 345 + } else if (imm > -32768 && imm <= 32768) { 346 + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm))); 347 + } else { 348 + PPC_LI32(b2p[TMP_REG_1], imm); 349 + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1])); 369 350 } 370 351 goto bpf_alu32_trunc; 371 352 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ ··· 422 389 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ 423 390 if (imm == 0) 424 391 return -EINVAL; 425 - else if (imm == 1) 426 - goto bpf_alu32_trunc; 392 + if (imm == 1) { 393 + if (BPF_OP(code) == BPF_DIV) { 394 + goto bpf_alu32_trunc; 395 + } else { 396 + EMIT(PPC_RAW_LI(dst_reg, 0)); 397 + break; 398 + } 399 + } 427 400 428 401 PPC_LI32(b2p[TMP_REG_1], imm); 429 402 switch (BPF_CLASS(code)) { ··· 670 631 * BPF_ST NOSPEC (speculation barrier) 671 632 */ 672 633 case BPF_ST | BPF_NOSPEC: 634 + if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) || 635 + !security_ftr_enabled(SEC_FTR_STF_BARRIER)) 636 + break; 637 + 638 + switch (stf_barrier) { 639 + case STF_BARRIER_EIEIO: 640 + EMIT(PPC_RAW_EIEIO() | 0x02000000); 641 + break; 642 + case STF_BARRIER_SYNC_ORI: 643 + EMIT(PPC_RAW_SYNC()); 644 + EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0)); 645 + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); 646 + break; 647 + case STF_BARRIER_FALLBACK: 648 + EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1])); 649 + PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier)); 650 + EMIT(PPC_RAW_MTCTR(12)); 651 + EMIT(PPC_RAW_BCTRL()); 652 + EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1])); 653 + break; 654 + case STF_BARRIER_NONE: 655 + break; 656 + } 673 657 break; 674 658 675 659 /* ··· 1055 993 */ 1056 994 case BPF_JMP | BPF_TAIL_CALL: 1057 995 ctx->seen |= SEEN_TAILCALL; 1058 - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 996 + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 997 + if (ret < 0) 998 + return ret; 1059 999 break; 1060 1000 1061 1001 default:
+4
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 867 867 if (is_kdump_kernel() || reset_devices) { 868 868 pr_info("Issue PHB reset ...\n"); 869 869 list_for_each_entry(phb, &hose_list, list_node) { 870 + // Skip if the slot is empty 871 + if (list_empty(&PCI_DN(phb->dn)->child_list)) 872 + continue; 873 + 870 874 pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); 871 875 config_addr = pseries_eeh_get_pe_config_addr(pdn); 872 876
+15
arch/powerpc/platforms/pseries/msi.c
··· 507 507 irq_chip_unmask_parent(d); 508 508 } 509 509 510 + static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 511 + { 512 + struct msi_desc *entry = irq_data_get_msi_desc(data); 513 + 514 + /* 515 + * Do not update the MSIx vector table. It's not strictly necessary 516 + * because the table is initialized by the underlying hypervisor, PowerVM 517 + * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further 518 + * activation will fail. This can happen in some drivers (eg. IPR) which 519 + * deactivate an IRQ used for testing MSI support. 520 + */ 521 + entry->msg = *msg; 522 + } 523 + 510 524 static struct irq_chip pseries_pci_msi_irq_chip = { 511 525 .name = "pSeries-PCI-MSI", 512 526 .irq_shutdown = pseries_msi_shutdown, 513 527 .irq_mask = pseries_msi_mask, 514 528 .irq_unmask = pseries_msi_unmask, 515 529 .irq_eoi = irq_chip_eoi_parent, 530 + .irq_write_msi_msg = pseries_msi_write_msg, 516 531 }; 517 532 518 533 static struct msi_domain_info pseries_msi_domain_info = {