Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

MIPS: mm: Rewrite TLB uniquification for the hidden bit feature

Before the introduction of the EHINV feature, which lets software mark
TLB entries invalid, certain older implementations of the MIPS ISA were
equipped with an analogous bit, as a vendor extension, which however is
hidden from software and only ever set at reset, and then any software
write clears it, making the intended TLB entry valid.

This feature makes it unsafe to read a TLB entry with TLBR, modify the
page mask, and write the entry back with TLBWI, because this operation
will implicitly clear the hidden bit and this may create a duplicate
entry, as with the presence of the hidden bit there is no guarantee all
the entries across the TLB are unique each.

Usually the firmware has already uniquified TLB entries before handing
control over, in which case we only need to guarantee at bootstrap no
clash will happen with the VPN2 values chosen in local_flush_tlb_all().

However with systems such as Mikrotik RB532 we get handed the TLB as at
reset, with the hidden bit set across the entries and possibly duplicate
entries present. This then causes a machine check exception when page
sizes are reset in r4k_tlb_uniquify() and prevents the system from
booting.

Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
the reuse of ASID/VPN values across the TLB. Get rid of global entries
first as they may be blocking the entire address space, e.g. 16 256MiB
pages will exhaust the whole address space of a 32-bit CPU and a single
big page can exhaust the 32-bit compatibility space on a 64-bit CPU.

Details of the algorithm chosen are given across the code itself.

Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Cc: stable@vger.kernel.org # v6.18+
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>

authored by

Maciej W. Rozycki and committed by
Thomas Bogendoerfer
540760b7 74283cfe

+228 -54
+228 -54
arch/mips/mm/tlb-r4k.c
··· 13 13 #include <linux/sched.h> 14 14 #include <linux/smp.h> 15 15 #include <linux/memblock.h> 16 + #include <linux/minmax.h> 16 17 #include <linux/mm.h> 17 18 #include <linux/hugetlb.h> 18 19 #include <linux/export.h> ··· 25 24 #include <asm/hazards.h> 26 25 #include <asm/mmu_context.h> 27 26 #include <asm/tlb.h> 27 + #include <asm/tlbdebug.h> 28 28 #include <asm/tlbex.h> 29 29 #include <asm/tlbmisc.h> 30 30 #include <asm/setup.h> ··· 513 511 __setup("ntlb=", set_ntlb); 514 512 515 513 516 - /* Comparison function for EntryHi VPN fields. */ 517 - static int r4k_vpn_cmp(const void *a, const void *b) 514 + /* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */ 515 + #define VPN2_SHIFT 13 516 + 517 + /* Read full EntryHi even with CONFIG_32BIT. */ 518 + static inline unsigned long long read_c0_entryhi_native(void) 518 519 { 519 - long v = *(unsigned long *)a - *(unsigned long *)b; 520 - int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; 521 - return s ? (v != 0) | v >> s : v; 520 + return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi(); 521 + } 522 + 523 + /* Write full EntryHi even with CONFIG_32BIT. */ 524 + static inline void write_c0_entryhi_native(unsigned long long v) 525 + { 526 + if (cpu_has_64bits) 527 + write_c0_entryhi_64(v); 528 + else 529 + write_c0_entryhi(v); 530 + } 531 + 532 + /* TLB entry state for uniquification. */ 533 + struct tlbent { 534 + unsigned long long wired:1; 535 + unsigned long long global:1; 536 + unsigned long long asid:10; 537 + unsigned long long vpn:51; 538 + unsigned long long pagesz:5; 539 + unsigned long long index:14; 540 + }; 541 + 542 + /* 543 + * Comparison function for TLB entry sorting. Place wired entries first, 544 + * then global entries, then order by the increasing VPN/ASID and the 545 + * decreasing page size. This lets us avoid clashes with wired entries 546 + * easily and get entries for larger pages out of the way first. 547 + * 548 + * We could group bits so as to reduce the number of comparisons, but this 549 + * is seldom executed and not performance-critical, so prefer legibility. 550 + */ 551 + static int r4k_entry_cmp(const void *a, const void *b) 552 + { 553 + struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b; 554 + 555 + if (ea.wired > eb.wired) 556 + return -1; 557 + else if (ea.wired < eb.wired) 558 + return 1; 559 + else if (ea.global > eb.global) 560 + return -1; 561 + else if (ea.global < eb.global) 562 + return 1; 563 + else if (ea.vpn < eb.vpn) 564 + return -1; 565 + else if (ea.vpn > eb.vpn) 566 + return 1; 567 + else if (ea.asid < eb.asid) 568 + return -1; 569 + else if (ea.asid > eb.asid) 570 + return 1; 571 + else if (ea.pagesz > eb.pagesz) 572 + return -1; 573 + else if (ea.pagesz < eb.pagesz) 574 + return 1; 575 + else 576 + return 0; 577 + } 578 + 579 + /* 580 + * Fetch all the TLB entries. Mask individual VPN values retrieved with 581 + * the corresponding page mask and ignoring any 1KiB extension as we'll 582 + * be using 4KiB pages for uniquification. 583 + */ 584 + static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize) 585 + { 586 + int start = num_wired_entries(); 587 + unsigned long long vpn_mask; 588 + bool global; 589 + int i; 590 + 591 + vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT); 592 + vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31; 593 + 594 + for (i = 0; i < tlbsize; i++) { 595 + unsigned long long entryhi, vpn, mask, asid; 596 + unsigned int pagesz; 597 + 598 + write_c0_index(i); 599 + mtc0_tlbr_hazard(); 600 + tlb_read(); 601 + tlb_read_hazard(); 602 + 603 + global = !!(read_c0_entrylo0() & ENTRYLO_G); 604 + entryhi = read_c0_entryhi_native(); 605 + mask = read_c0_pagemask(); 606 + 607 + asid = entryhi & cpu_asid_mask(&current_cpu_data); 608 + vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT; 609 + pagesz = ilog2((mask >> VPN2_SHIFT) + 1); 610 + 611 + tlb_vpns[i].global = global; 612 + tlb_vpns[i].asid = global ? 0 : asid; 613 + tlb_vpns[i].vpn = vpn; 614 + tlb_vpns[i].pagesz = pagesz; 615 + tlb_vpns[i].wired = i < start; 616 + tlb_vpns[i].index = i; 617 + } 618 + } 619 + 620 + /* 621 + * Write unique values to all but the wired TLB entries each, using 622 + * the 4KiB page size. This size might not be supported with R6, but 623 + * EHINV is mandatory for R6, so we won't ever be called in that case. 624 + * 625 + * A sorted table is supplied with any wired entries at the beginning, 626 + * followed by any global entries, and then finally regular entries. 627 + * We start at the VPN and ASID values of zero and only assign user 628 + * addresses, therefore guaranteeing no clash with addresses produced 629 + * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global 630 + * entries, by increasing the VPN value beyond the span of such entry. 631 + * 632 + * When a VPN/ASID clash is found with a regular entry we increment the 633 + * ASID instead until no VPN/ASID clash has been found or the ASID space 634 + * has been exhausted, in which case we increase the VPN value beyond 635 + * the span of the largest clashing entry. 636 + * 637 + * We do not need to be concerned about FTLB or MMID configurations as 638 + * those are required to implement the EHINV feature. 639 + */ 640 + static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize) 641 + { 642 + unsigned long long asid, vpn, vpn_size, pagesz; 643 + int widx, gidx, idx, sidx, lidx, i; 644 + 645 + vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT); 646 + pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1); 647 + 648 + write_c0_pagemask(PM_4K); 649 + write_c0_entrylo0(0); 650 + write_c0_entrylo1(0); 651 + 652 + asid = 0; 653 + vpn = 0; 654 + widx = 0; 655 + gidx = 0; 656 + for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++) 657 + ; 658 + for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++) 659 + ; 660 + idx = gidx = sidx + 1; 661 + for (i = sidx; i < tlbsize; i++) { 662 + unsigned long long entryhi, vpn_pagesz = 0; 663 + 664 + while (1) { 665 + if (WARN_ON(vpn >= vpn_size)) { 666 + dump_tlb_all(); 667 + /* Pray local_flush_tlb_all() will cope. */ 668 + return; 669 + } 670 + 671 + /* VPN must be below the next wired entry. */ 672 + if (widx < sidx && vpn >= tlb_vpns[widx].vpn) { 673 + vpn = max(vpn, 674 + (tlb_vpns[widx].vpn + 675 + (1ULL << tlb_vpns[widx].pagesz))); 676 + asid = 0; 677 + widx++; 678 + continue; 679 + } 680 + /* VPN must be below the next global entry. */ 681 + if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) { 682 + vpn = max(vpn, 683 + (tlb_vpns[gidx].vpn + 684 + (1ULL << tlb_vpns[gidx].pagesz))); 685 + asid = 0; 686 + gidx++; 687 + continue; 688 + } 689 + /* Try to find a free ASID so as to conserve VPNs. */ 690 + if (idx < tlbsize && vpn == tlb_vpns[idx].vpn && 691 + asid == tlb_vpns[idx].asid) { 692 + unsigned long long idx_pagesz; 693 + 694 + idx_pagesz = tlb_vpns[idx].pagesz; 695 + vpn_pagesz = max(vpn_pagesz, idx_pagesz); 696 + do 697 + idx++; 698 + while (idx < tlbsize && 699 + vpn == tlb_vpns[idx].vpn && 700 + asid == tlb_vpns[idx].asid); 701 + asid++; 702 + if (asid > cpu_asid_mask(&current_cpu_data)) { 703 + vpn += vpn_pagesz; 704 + asid = 0; 705 + vpn_pagesz = 0; 706 + } 707 + continue; 708 + } 709 + /* VPN mustn't be above the next regular entry. */ 710 + if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) { 711 + vpn = max(vpn, 712 + (tlb_vpns[idx].vpn + 713 + (1ULL << tlb_vpns[idx].pagesz))); 714 + asid = 0; 715 + idx++; 716 + continue; 717 + } 718 + break; 719 + } 720 + 721 + entryhi = (vpn << VPN2_SHIFT) | asid; 722 + write_c0_entryhi_native(entryhi); 723 + write_c0_index(tlb_vpns[i].index); 724 + mtc0_tlbw_hazard(); 725 + tlb_write_indexed(); 726 + 727 + tlb_vpns[i].asid = asid; 728 + tlb_vpns[i].vpn = vpn; 729 + tlb_vpns[i].pagesz = pagesz; 730 + 731 + asid++; 732 + if (asid > cpu_asid_mask(&current_cpu_data)) { 733 + vpn += 1ULL << pagesz; 734 + asid = 0; 735 + } 736 + } 522 737 } 523 738 524 739 /* ··· 746 527 { 747 528 int tlbsize = current_cpu_data.tlbsize; 748 529 bool use_slab = slab_is_available(); 749 - int start = num_wired_entries(); 750 530 phys_addr_t tlb_vpn_size; 751 - unsigned long *tlb_vpns; 752 - unsigned long vpn_mask; 753 - int cnt, ent, idx, i; 754 - 755 - vpn_mask = GENMASK(cpu_vmbits - 1, 13); 756 - vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; 531 + struct tlbent *tlb_vpns; 757 532 758 533 tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); 759 534 tlb_vpns = (use_slab ? ··· 758 545 759 546 htw_stop(); 760 547 761 - for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { 762 - unsigned long vpn; 548 + r4k_tlb_uniquify_read(tlb_vpns, tlbsize); 763 549 764 - write_c0_index(i); 765 - mtc0_tlbr_hazard(); 766 - tlb_read(); 767 - tlb_read_hazard(); 768 - vpn = read_c0_entryhi(); 769 - vpn &= vpn_mask & PAGE_MASK; 770 - tlb_vpns[cnt] = vpn; 550 + sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL); 771 551 772 - /* Prevent any large pages from overlapping regular ones. */ 773 - write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); 774 - mtc0_tlbw_hazard(); 775 - tlb_write_indexed(); 776 - tlbw_use_hazard(); 777 - } 778 - 779 - sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); 552 + r4k_tlb_uniquify_write(tlb_vpns, tlbsize); 780 553 781 554 write_c0_pagemask(PM_DEFAULT_MASK); 782 - write_c0_entrylo0(0); 783 - write_c0_entrylo1(0); 784 - 785 - idx = 0; 786 - ent = tlbsize; 787 - for (i = start; i < tlbsize; i++) 788 - while (1) { 789 - unsigned long entryhi, vpn; 790 - 791 - entryhi = UNIQUE_ENTRYHI(ent); 792 - vpn = entryhi & vpn_mask & PAGE_MASK; 793 - 794 - if (idx >= cnt || vpn < tlb_vpns[idx]) { 795 - write_c0_entryhi(entryhi); 796 - write_c0_index(i); 797 - mtc0_tlbw_hazard(); 798 - tlb_write_indexed(); 799 - ent++; 800 - break; 801 - } else if (vpn == tlb_vpns[idx]) { 802 - ent++; 803 - } else { 804 - idx++; 805 - } 806 - } 807 555 808 556 tlbw_use_hazard(); 809 557 htw_start();