Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
"Fix a bug on pseries where spurious wakeups from H_PROD would prevent
partition migration from succeeding.

Fix oopses seen in pcpu_alloc(), caused by parallel faults of the
percpu mapping causing us to corrupt the protection key used for the
mapping, and cause a fatal key fault.

Thanks to Aneesh Kumar K.V, Murilo Opsfelder Araujo, and Nathan Lynch"

* tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/mm/book3s64: Use the correct storage key value when calling H_PROTECT
powerpc/pseries/mobility: handle premature return from H_JOIN
powerpc/pseries/mobility: use struct for shared state

+46 -5
+2 -1
arch/powerpc/platforms/pseries/lpar.c
··· 887 887 888 888 want_v = hpte_encode_avpn(vpn, psize, ssize); 889 889 890 - flags = (newpp & 7) | H_AVPN; 890 + flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN; 891 + flags |= (newpp & HPTE_R_KEY_HI) >> 48; 891 892 if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 892 893 /* Move pp0 into bit 8 (IBM 55) */ 893 894 flags |= (newpp & HPTE_R_PP0) >> 55;
+44 -4
arch/powerpc/platforms/pseries/mobility.c
··· 452 452 return ret; 453 453 } 454 454 455 + /** 456 + * struct pseries_suspend_info - State shared between CPUs for join/suspend. 457 + * @counter: Threads are to increment this upon resuming from suspend 458 + * or if an error is received from H_JOIN. The thread which performs 459 + * the first increment (i.e. sets it to 1) is responsible for 460 + * waking the other threads. 461 + * @done: False if join/suspend is in progress. True if the operation is 462 + * complete (successful or not). 463 + */ 464 + struct pseries_suspend_info { 465 + atomic_t counter; 466 + bool done; 467 + }; 468 + 455 469 static int do_join(void *arg) 456 470 { 457 - atomic_t *counter = arg; 471 + struct pseries_suspend_info *info = arg; 472 + atomic_t *counter = &info->counter; 458 473 long hvrc; 459 474 int ret; 460 475 476 + retry: 461 477 /* Must ensure MSR.EE off for H_JOIN. */ 462 478 hard_irq_disable(); 463 479 hvrc = plpar_hcall_norets(H_JOIN); ··· 489 473 case H_SUCCESS: 490 474 /* 491 475 * The suspend is complete and this cpu has received a 492 - * prod. 476 + * prod, or we've received a stray prod from unrelated 477 + * code (e.g. paravirt spinlocks) and we need to join 478 + * again. 479 + * 480 + * This barrier orders the return from H_JOIN above vs 481 + * the load of info->done. It pairs with the barrier 482 + * in the wakeup/prod path below. 493 483 */ 484 + smp_mb(); 485 + if (READ_ONCE(info->done) == false) { 486 + pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying", 487 + smp_processor_id()); 488 + goto retry; 489 + } 494 490 ret = 0; 495 491 break; 496 492 case H_BAD_MODE: ··· 516 488 517 489 if (atomic_inc_return(counter) == 1) { 518 490 pr_info("CPU %u waking all threads\n", smp_processor_id()); 491 + WRITE_ONCE(info->done, true); 492 + /* 493 + * This barrier orders the store to info->done vs subsequent 494 + * H_PRODs to wake the other CPUs. It pairs with the barrier 495 + * in the H_SUCCESS case above. 496 + */ 497 + smp_mb(); 519 498 prod_others(); 520 499 } 521 500 /* ··· 570 535 int ret; 571 536 572 537 while (true) { 573 - atomic_t counter = ATOMIC_INIT(0); 538 + struct pseries_suspend_info info; 574 539 unsigned long vasi_state; 575 540 int vasi_err; 576 541 577 - ret = stop_machine(do_join, &counter, cpu_online_mask); 542 + info = (struct pseries_suspend_info) { 543 + .counter = ATOMIC_INIT(0), 544 + .done = false, 545 + }; 546 + 547 + ret = stop_machine(do_join, &info, cpu_online_mask); 578 548 if (ret == 0) 579 549 break; 580 550 /*