Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

memblock tests: add generic NUMA tests for memblock_alloc_exact_nid_raw

Add tests for memblock_alloc_exact_nid_raw() where the simulated physical
memory is set up with multiple NUMA nodes. Additionally, all but one of
these tests set nid != NUMA_NO_NODE. All tests are run for both top-down
and bottom-up allocation directions.

The tested scenarios are:

Range unrestricted:
- region cannot be allocated:
+ there are no previously reserved regions, but requested node is
too small
+ the requested node is fully reserved
+ the requested node is partially reserved and does not have
enough space
+ none of the nodes have enough memory to allocate the region

Range restricted:
- region can be allocated in the specific node requested without
dropping min_addr:
+ the range fully overlaps with the node, and there are adjacent
reserved regions
- region cannot be allocated:
+ range partially overlaps with two different nodes, where the
second node is the requested node
+ range overlaps with multiple nodes along node boundaries, and
the requested node starts after max_addr
+ nid is set to NUMA_NO_NODE and the total range can fit the
region, but the range is split between two nodes and everything
else is reserved

Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Rebecca Mckeever <remckee0@gmail.com>
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/51b14da46e6591428df3aefc5acc7dca9341a541.1667802195.git.remckee0@gmail.com

authored by

Rebecca Mckeever and committed by
Mike Rapoport
62bdc990 b6df23ed

+465
+465
tools/testing/memblock/tests/alloc_exact_nid_api.c
··· 560 560 return 0; 561 561 } 562 562 563 + /* 564 + * A test that tries to allocate a memory region in a specific NUMA node that 565 + * does not have enough memory to allocate a region of the requested size: 566 + * 567 + * | +-----+ | 568 + * | | req | | 569 + * +---+-----+----------------------------+ 570 + * 571 + * +---------+ 572 + * | rgn | 573 + * +---------+ 574 + * 575 + * Expect no allocation to happen. 576 + */ 577 + static int alloc_exact_nid_numa_small_node_generic_check(void) 578 + { 579 + int nid_req = 1; 580 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 581 + void *allocated_ptr = NULL; 582 + phys_addr_t size; 583 + phys_addr_t min_addr; 584 + phys_addr_t max_addr; 585 + 586 + PREFIX_PUSH(); 587 + setup_numa_memblock(node_fractions); 588 + 589 + size = SZ_2 * req_node->size; 590 + min_addr = memblock_start_of_DRAM(); 591 + max_addr = memblock_end_of_DRAM(); 592 + 593 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 594 + min_addr, max_addr, 595 + nid_req); 596 + 597 + ASSERT_EQ(allocated_ptr, NULL); 598 + 599 + test_pass_pop(); 600 + 601 + return 0; 602 + } 603 + 604 + /* 605 + * A test that tries to allocate a memory region in a specific NUMA node that 606 + * is fully reserved: 607 + * 608 + * | +---------+ | 609 + * | |requested| | 610 + * +--------------+---------+-------------+ 611 + * 612 + * | +---------+ | 613 + * | | reserved| | 614 + * +--------------+---------+-------------+ 615 + * 616 + * Expect no allocation to happen. 617 + */ 618 + static int alloc_exact_nid_numa_node_reserved_generic_check(void) 619 + { 620 + int nid_req = 2; 621 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 622 + void *allocated_ptr = NULL; 623 + phys_addr_t size; 624 + phys_addr_t min_addr; 625 + phys_addr_t max_addr; 626 + 627 + PREFIX_PUSH(); 628 + setup_numa_memblock(node_fractions); 629 + 630 + size = req_node->size; 631 + min_addr = memblock_start_of_DRAM(); 632 + max_addr = memblock_end_of_DRAM(); 633 + 634 + memblock_reserve(req_node->base, req_node->size); 635 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 636 + min_addr, max_addr, 637 + nid_req); 638 + 639 + ASSERT_EQ(allocated_ptr, NULL); 640 + 641 + test_pass_pop(); 642 + 643 + return 0; 644 + } 645 + 646 + /* 647 + * A test that tries to allocate a memory region in a specific NUMA node that 648 + * is partially reserved and does not have enough contiguous memory for the 649 + * allocated region: 650 + * 651 + * | +-----------------------+ | 652 + * | | requested | | 653 + * +-----------+-----------------------+----+ 654 + * 655 + * | +----------+ | 656 + * | | reserved | | 657 + * +-----------------+----------+-----------+ 658 + * 659 + * Expect no allocation to happen. 660 + */ 661 + static int alloc_exact_nid_numa_part_reserved_fail_generic_check(void) 662 + { 663 + int nid_req = 4; 664 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 665 + void *allocated_ptr = NULL; 666 + struct region r1; 667 + phys_addr_t size; 668 + phys_addr_t min_addr; 669 + phys_addr_t max_addr; 670 + 671 + PREFIX_PUSH(); 672 + setup_numa_memblock(node_fractions); 673 + 674 + ASSERT_LE(SZ_4, req_node->size); 675 + size = req_node->size / SZ_2; 676 + r1.base = req_node->base + (size / SZ_2); 677 + r1.size = size; 678 + 679 + min_addr = memblock_start_of_DRAM(); 680 + max_addr = memblock_end_of_DRAM(); 681 + 682 + memblock_reserve(r1.base, r1.size); 683 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 684 + min_addr, max_addr, 685 + nid_req); 686 + 687 + ASSERT_EQ(allocated_ptr, NULL); 688 + 689 + test_pass_pop(); 690 + 691 + return 0; 692 + } 693 + 694 + /* 695 + * A test that tries to allocate a memory region that spans over the min_addr 696 + * and max_addr range and overlaps with two different nodes, where the second 697 + * node is the requested node: 698 + * 699 + * min_addr 700 + * | max_addr 701 + * | | 702 + * v v 703 + * | +--------------------------+---------+ | 704 + * | | first node |requested| | 705 + * +------+--------------------------+---------+----------------+ 706 + * 707 + * Expect no allocation to happen. 708 + */ 709 + static int alloc_exact_nid_numa_split_range_high_generic_check(void) 710 + { 711 + int nid_req = 3; 712 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 713 + void *allocated_ptr = NULL; 714 + phys_addr_t size = SZ_512; 715 + phys_addr_t min_addr; 716 + phys_addr_t max_addr; 717 + 718 + PREFIX_PUSH(); 719 + setup_numa_memblock(node_fractions); 720 + 721 + min_addr = req_node->base - SZ_256; 722 + max_addr = min_addr + size; 723 + 724 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 725 + min_addr, max_addr, 726 + nid_req); 727 + 728 + ASSERT_EQ(allocated_ptr, NULL); 729 + 730 + test_pass_pop(); 731 + 732 + return 0; 733 + } 734 + 735 + /* 736 + * A test that tries to allocate memory within min_addr and max_add range when 737 + * the requested node and the range do not overlap, and requested node starts 738 + * after max_addr. The range overlaps with multiple nodes along node 739 + * boundaries: 740 + * 741 + * min_addr 742 + * | max_addr 743 + * | | 744 + * v v 745 + * | +----------+----...----+----------+ +-----------+ | 746 + * | | min node | ... | max node | | requested | | 747 + * +-----+----------+----...----+----------+--------+-----------+---+ 748 + * 749 + * Expect no allocation to happen. 750 + */ 751 + static int alloc_exact_nid_numa_no_overlap_high_generic_check(void) 752 + { 753 + int nid_req = 7; 754 + struct memblock_region *min_node = &memblock.memory.regions[2]; 755 + struct memblock_region *max_node = &memblock.memory.regions[5]; 756 + void *allocated_ptr = NULL; 757 + phys_addr_t size = SZ_64; 758 + phys_addr_t max_addr; 759 + phys_addr_t min_addr; 760 + 761 + PREFIX_PUSH(); 762 + setup_numa_memblock(node_fractions); 763 + 764 + min_addr = min_node->base; 765 + max_addr = region_end(max_node); 766 + 767 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 768 + min_addr, max_addr, 769 + nid_req); 770 + 771 + ASSERT_EQ(allocated_ptr, NULL); 772 + 773 + test_pass_pop(); 774 + 775 + return 0; 776 + } 777 + 778 + /* 779 + * A test that tries to allocate a memory region in a specific NUMA node that 780 + * does not have enough memory to allocate a region of the requested size. 781 + * Additionally, none of the nodes have enough memory to allocate the region: 782 + * 783 + * +-----------------------------------+ 784 + * | new | 785 + * +-----------------------------------+ 786 + * |-------+-------+-------+-------+-------+-------+-------+-------| 787 + * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 | 788 + * +-------+-------+-------+-------+-------+-------+-------+-------+ 789 + * 790 + * Expect no allocation to happen. 791 + */ 792 + static int alloc_exact_nid_numa_large_region_generic_check(void) 793 + { 794 + int nid_req = 3; 795 + void *allocated_ptr = NULL; 796 + phys_addr_t size = MEM_SIZE / SZ_2; 797 + phys_addr_t min_addr; 798 + phys_addr_t max_addr; 799 + 800 + PREFIX_PUSH(); 801 + setup_numa_memblock(node_fractions); 802 + 803 + min_addr = memblock_start_of_DRAM(); 804 + max_addr = memblock_end_of_DRAM(); 805 + 806 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 807 + min_addr, max_addr, 808 + nid_req); 809 + ASSERT_EQ(allocated_ptr, NULL); 810 + 811 + test_pass_pop(); 812 + 813 + return 0; 814 + } 815 + 816 + /* 817 + * A test that tries to allocate memory within min_addr and max_addr range when 818 + * there are two reserved regions at the borders. The requested node starts at 819 + * min_addr and ends at max_addr and is the same size as the region to be 820 + * allocated: 821 + * 822 + * min_addr 823 + * | max_addr 824 + * | | 825 + * v v 826 + * | +-----------+-----------------------+-----------------------| 827 + * | | node5 | requested | node7 | 828 + * +------+-----------+-----------------------+-----------------------+ 829 + * + + 830 + * | +----+-----------------------+----+ | 831 + * | | r2 | new | r1 | | 832 + * +-------------+----+-----------------------+----+------------------+ 833 + * 834 + * Expect to merge all of the regions into one. The region counter and total 835 + * size fields get updated. 836 + */ 837 + static int alloc_exact_nid_numa_reserved_full_merge_generic_check(void) 838 + { 839 + int nid_req = 6; 840 + int nid_next = nid_req + 1; 841 + struct memblock_region *new_rgn = &memblock.reserved.regions[0]; 842 + struct memblock_region *req_node = &memblock.memory.regions[nid_req]; 843 + struct memblock_region *next_node = &memblock.memory.regions[nid_next]; 844 + void *allocated_ptr = NULL; 845 + struct region r1, r2; 846 + phys_addr_t size = req_node->size; 847 + phys_addr_t total_size; 848 + phys_addr_t max_addr; 849 + phys_addr_t min_addr; 850 + 851 + PREFIX_PUSH(); 852 + setup_numa_memblock(node_fractions); 853 + 854 + r1.base = next_node->base; 855 + r1.size = SZ_128; 856 + 857 + r2.size = SZ_128; 858 + r2.base = r1.base - (size + r2.size); 859 + 860 + total_size = r1.size + r2.size + size; 861 + min_addr = r2.base + r2.size; 862 + max_addr = r1.base; 863 + 864 + memblock_reserve(r1.base, r1.size); 865 + memblock_reserve(r2.base, r2.size); 866 + 867 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 868 + min_addr, max_addr, 869 + nid_req); 870 + 871 + ASSERT_NE(allocated_ptr, NULL); 872 + ASSERT_MEM_NE(allocated_ptr, 0, size); 873 + 874 + ASSERT_EQ(new_rgn->size, total_size); 875 + ASSERT_EQ(new_rgn->base, r2.base); 876 + 877 + ASSERT_LE(new_rgn->base, req_node->base); 878 + ASSERT_LE(region_end(req_node), region_end(new_rgn)); 879 + 880 + ASSERT_EQ(memblock.reserved.cnt, 1); 881 + ASSERT_EQ(memblock.reserved.total_size, total_size); 882 + 883 + test_pass_pop(); 884 + 885 + return 0; 886 + } 887 + 888 + /* 889 + * A test that tries to allocate memory within min_addr and max_add range, 890 + * where the total range can fit the region, but it is split between two nodes 891 + * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE 892 + * instead of requesting a specific node: 893 + * 894 + * +-----------+ 895 + * | new | 896 + * +-----------+ 897 + * | +---------------------+-----------| 898 + * | | prev node | next node | 899 + * +------+---------------------+-----------+ 900 + * + + 901 + * |----------------------+ +-----| 902 + * | r1 | | r2 | 903 + * +----------------------+-----------+-----+ 904 + * ^ ^ 905 + * | | 906 + * | max_addr 907 + * | 908 + * min_addr 909 + * 910 + * Expect no allocation to happen. 911 + */ 912 + static int alloc_exact_nid_numa_split_all_reserved_generic_check(void) 913 + { 914 + void *allocated_ptr = NULL; 915 + struct memblock_region *next_node = &memblock.memory.regions[7]; 916 + struct region r1, r2; 917 + phys_addr_t size = SZ_256; 918 + phys_addr_t max_addr; 919 + phys_addr_t min_addr; 920 + 921 + PREFIX_PUSH(); 922 + setup_numa_memblock(node_fractions); 923 + 924 + r2.base = next_node->base + SZ_128; 925 + r2.size = memblock_end_of_DRAM() - r2.base; 926 + 927 + r1.size = MEM_SIZE - (r2.size + size); 928 + r1.base = memblock_start_of_DRAM(); 929 + 930 + min_addr = r1.base + r1.size; 931 + max_addr = r2.base; 932 + 933 + memblock_reserve(r1.base, r1.size); 934 + memblock_reserve(r2.base, r2.size); 935 + 936 + allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES, 937 + min_addr, max_addr, 938 + NUMA_NO_NODE); 939 + 940 + ASSERT_EQ(allocated_ptr, NULL); 941 + 942 + test_pass_pop(); 943 + 944 + return 0; 945 + } 946 + 563 947 /* Test case wrappers for NUMA tests */ 564 948 static int alloc_exact_nid_numa_simple_check(void) 565 949 { ··· 1000 616 return 0; 1001 617 } 1002 618 619 + static int alloc_exact_nid_numa_small_node_check(void) 620 + { 621 + test_print("\tRunning %s...\n", __func__); 622 + run_top_down(alloc_exact_nid_numa_small_node_generic_check); 623 + run_bottom_up(alloc_exact_nid_numa_small_node_generic_check); 624 + 625 + return 0; 626 + } 627 + 628 + static int alloc_exact_nid_numa_node_reserved_check(void) 629 + { 630 + test_print("\tRunning %s...\n", __func__); 631 + run_top_down(alloc_exact_nid_numa_node_reserved_generic_check); 632 + run_bottom_up(alloc_exact_nid_numa_node_reserved_generic_check); 633 + 634 + return 0; 635 + } 636 + 637 + static int alloc_exact_nid_numa_part_reserved_fail_check(void) 638 + { 639 + test_print("\tRunning %s...\n", __func__); 640 + run_top_down(alloc_exact_nid_numa_part_reserved_fail_generic_check); 641 + run_bottom_up(alloc_exact_nid_numa_part_reserved_fail_generic_check); 642 + 643 + return 0; 644 + } 645 + 646 + static int alloc_exact_nid_numa_split_range_high_check(void) 647 + { 648 + test_print("\tRunning %s...\n", __func__); 649 + run_top_down(alloc_exact_nid_numa_split_range_high_generic_check); 650 + run_bottom_up(alloc_exact_nid_numa_split_range_high_generic_check); 651 + 652 + return 0; 653 + } 654 + 655 + static int alloc_exact_nid_numa_no_overlap_high_check(void) 656 + { 657 + test_print("\tRunning %s...\n", __func__); 658 + run_top_down(alloc_exact_nid_numa_no_overlap_high_generic_check); 659 + run_bottom_up(alloc_exact_nid_numa_no_overlap_high_generic_check); 660 + 661 + return 0; 662 + } 663 + 664 + static int alloc_exact_nid_numa_large_region_check(void) 665 + { 666 + test_print("\tRunning %s...\n", __func__); 667 + run_top_down(alloc_exact_nid_numa_large_region_generic_check); 668 + run_bottom_up(alloc_exact_nid_numa_large_region_generic_check); 669 + 670 + return 0; 671 + } 672 + 673 + static int alloc_exact_nid_numa_reserved_full_merge_check(void) 674 + { 675 + test_print("\tRunning %s...\n", __func__); 676 + run_top_down(alloc_exact_nid_numa_reserved_full_merge_generic_check); 677 + run_bottom_up(alloc_exact_nid_numa_reserved_full_merge_generic_check); 678 + 679 + return 0; 680 + } 681 + 682 + static int alloc_exact_nid_numa_split_all_reserved_check(void) 683 + { 684 + test_print("\tRunning %s...\n", __func__); 685 + run_top_down(alloc_exact_nid_numa_split_all_reserved_generic_check); 686 + run_bottom_up(alloc_exact_nid_numa_split_all_reserved_generic_check); 687 + 688 + return 0; 689 + } 690 + 1003 691 int __memblock_alloc_exact_nid_numa_checks(void) 1004 692 { 1005 693 test_print("Running %s NUMA tests...\n", FUNC_NAME); ··· 1081 625 alloc_exact_nid_numa_split_range_low_check(); 1082 626 alloc_exact_nid_numa_no_overlap_split_check(); 1083 627 alloc_exact_nid_numa_no_overlap_low_check(); 628 + 629 + alloc_exact_nid_numa_small_node_check(); 630 + alloc_exact_nid_numa_node_reserved_check(); 631 + alloc_exact_nid_numa_part_reserved_fail_check(); 632 + alloc_exact_nid_numa_split_range_high_check(); 633 + alloc_exact_nid_numa_no_overlap_high_check(); 634 + alloc_exact_nid_numa_large_region_check(); 635 + alloc_exact_nid_numa_reserved_full_merge_check(); 636 + alloc_exact_nid_numa_split_all_reserved_check(); 1084 637 1085 638 return 0; 1086 639 }