"Das U-Boot" Source Tree
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge patch series "lmb: use a single API for all allocations"

Sughosh Ganu <sughosh.ganu@linaro.org> says:

The LMB module has a bunch for API's which are used for allocating
memory. There are a couple of API's for requesting memory, and two
more for reserving regions of memory. Replace these different API's
with a single one, lmb_alloc_mem(). The type of allocation to be made
is specified through one of the parameters to the function.

Additionally, the two API's for reserving regions of memory,
lmb_reserve() and lmb_alloc_addr() are the same with one
difference. One can reserve any memory region with lmb_reserve(),
while lmb_alloc_addr() actually checks that the memory region being
requested is part of the LMB memory map. Reserving memory that is not
part of the LMB memory map is pretty futile -- the allocation
functions do not allocate memory which has not been added to the LMB
memory map.

This series also removes the functionality allowing for reserving
memory regions outside the LMB memory map. Any request for reserving a
region of memory outside the LMB memory map now returns an -EINVAL
error.

Certain places in the common code using the LMB API's were not
checking the return value of the functions. Checks have been added for
them. There are some calls being made from the architecture/platform
specific code which too do not check the return value. Those have been
kept the same, as I do not have the platform with me to check if it
causes any issues on those platforms.

In addition, there is a patch which refactors code in
lmb_overlaps_region() and lmb_can_reserve_region() so that both
functionalities can be put in a single function, lmb_overlap_checks().

Finally, a new patch has been added which checks the return value of
the lmb allocation function before copying the device-tree to the
allocated address.

Link: https://lore.kernel.org/r/20250617104346.1379981-1-sughosh.ganu@linaro.org
[trini: Rework arch/arm/mach-snapdragon/board.c merge]
Signed-off-by: Tom Rini <trini@konsulko.com>

Tom Rini b40d7b8f 0862a8c4

+580 -282
+18 -9
arch/arm/mach-apple/board.c
··· 773 773 774 774 #define KERNEL_COMP_SIZE SZ_128M 775 775 776 + #define lmb_alloc(size, addr) lmb_alloc_mem(LMB_MEM_ALLOC_ANY, SZ_2M, addr, size, LMB_NONE) 777 + 776 778 int board_late_init(void) 777 779 { 778 780 u32 status = 0; 781 + phys_addr_t addr; 779 782 780 783 /* somewhat based on the Linux Kernel boot requirements: 781 784 * align by 2M and maximal FDT size 2M 782 785 */ 783 - status |= env_set_hex("loadaddr", lmb_alloc(SZ_1G, SZ_2M)); 784 - status |= env_set_hex("fdt_addr_r", lmb_alloc(SZ_2M, SZ_2M)); 785 - status |= env_set_hex("kernel_addr_r", lmb_alloc(SZ_128M, SZ_2M)); 786 - status |= env_set_hex("ramdisk_addr_r", lmb_alloc(SZ_1G, SZ_2M)); 787 - status |= env_set_hex("kernel_comp_addr_r", 788 - lmb_alloc(KERNEL_COMP_SIZE, SZ_2M)); 789 - status |= env_set_hex("kernel_comp_size", KERNEL_COMP_SIZE); 790 - status |= env_set_hex("scriptaddr", lmb_alloc(SZ_4M, SZ_2M)); 791 - status |= env_set_hex("pxefile_addr_r", lmb_alloc(SZ_4M, SZ_2M)); 786 + status |= !lmb_alloc(SZ_1G, &addr) ? env_set_hex("loadaddr", addr) : 1; 787 + status |= !lmb_alloc(SZ_2M, &addr) ? 788 + env_set_hex("fdt_addr_r", addr) : 1; 789 + status |= !lmb_alloc(SZ_128M, &addr) ? 790 + env_set_hex("kernel_addr_r", addr) : 1; 791 + status |= !lmb_alloc(SZ_1G, &addr) ? 792 + env_set_hex("ramdisk_addr_r", addr) : 1; 793 + status |= !lmb_alloc(KERNEL_COMP_SIZE, &addr) ? 794 + env_set_hex("kernel_comp_addr_r", addr) : 1; 795 + status |= !lmb_alloc(KERNEL_COMP_SIZE, &addr) ? 796 + env_set_hex("kernel_comp_size", addr) : 1; 797 + status |= !lmb_alloc(SZ_4M, &addr) ? 798 + env_set_hex("scriptaddr", addr) : 1; 799 + status |= !lmb_alloc(SZ_4M, &addr) ? 800 + env_set_hex("pxefile_addr_r", addr) : 1; 792 801 793 802 if (status) 794 803 log_warning("late_init: Failed to set run time variables\n");
+6 -2
arch/arm/mach-mediatek/tzcfg.c
··· 173 173 174 174 int arch_misc_init(void) 175 175 { 176 + phys_addr_t addr; 176 177 struct arm_smccc_res res; 177 178 178 179 /* ··· 180 181 * there's no need to check the result 181 182 */ 182 183 arm_smccc_smc(MTK_SIP_GET_BL31_REGION, 0, 0, 0, 0, 0, 0, 0, &res); 183 - lmb_reserve(res.a1, res.a2, LMB_NOMAP); 184 + addr = (phys_addr_t)res.a1; 185 + lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, res.a2, LMB_NOMAP); 184 186 185 187 arm_smccc_smc(MTK_SIP_GET_BL32_REGION, 0, 0, 0, 0, 0, 0, 0, &res); 188 + addr = (phys_addr_t)res.a1; 186 189 if (!res.a0 && res.a1 && res.a2) 187 - lmb_reserve(res.a1, res.a2, LMB_NOMAP); 190 + lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, res.a2, 191 + LMB_NOMAP); 188 192 189 193 #if IS_ENABLED(CONFIG_CMD_PSTORE) 190 194 char cmd[64];
+29 -16
arch/arm/mach-snapdragon/board.c
··· 517 517 #define FASTBOOT_BUF_SIZE 0 518 518 #endif 519 519 520 - #define addr_alloc(size) lmb_alloc(size, SZ_2M) 520 + #define lmb_alloc(size, addr) lmb_alloc_mem(LMB_MEM_ALLOC_ANY, SZ_2M, addr, size, LMB_NONE) 521 521 522 522 /* Stolen from arch/arm/mach-apple/board.c */ 523 523 int board_late_init(void) 524 524 { 525 - u32 status = 0; 525 + u32 status = 0, fdt_status = 0; 526 526 phys_addr_t addr; 527 527 struct fdt_header *fdt_blob = (struct fdt_header *)gd->fdt_blob; 528 528 529 529 /* We need to be fairly conservative here as we support boards with just 1G of TOTAL RAM */ 530 - addr = addr_alloc(SZ_128M); 530 + status |= !lmb_alloc(SZ_128M, &addr) ? 531 + env_set_hex("loadaddr", addr) : 1; 531 532 status |= env_set_hex("kernel_addr_r", addr); 532 - status |= env_set_hex("loadaddr", addr); 533 - status |= env_set_hex("ramdisk_addr_r", addr_alloc(SZ_128M)); 534 - status |= env_set_hex("kernel_comp_addr_r", addr_alloc(KERNEL_COMP_SIZE)); 535 - status |= env_set_hex("kernel_comp_size", KERNEL_COMP_SIZE); 533 + status |= !lmb_alloc(SZ_128M, &addr) ? 534 + env_set_hex("ramdisk_addr_r", addr) : 1; 535 + status |= !lmb_alloc(KERNEL_COMP_SIZE, &addr) ? 536 + env_set_hex("kernel_comp_addr_r", addr) : 1; 537 + status |= !lmb_alloc(KERNEL_COMP_SIZE, &addr) ? 538 + env_set_hex("kernel_comp_size", addr) : 1; 539 + status |= !lmb_alloc(SZ_4M, &addr) ? 540 + env_set_hex("scriptaddr", addr) : 1; 541 + status |= !lmb_alloc(SZ_4M, &addr) ? 542 + env_set_hex("pxefile_addr_r", addr) : 1; 543 + 536 544 if (IS_ENABLED(CONFIG_FASTBOOT)) { 537 - addr = addr_alloc(FASTBOOT_BUF_SIZE); 538 - status |= env_set_hex("fastboot_addr_r", addr); 545 + status |= !lmb_alloc(FASTBOOT_BUF_SIZE, &addr) ? 546 + env_set_hex("fastboot_addr_r", addr) : 1; 539 547 /* override loadaddr for memory rich soc */ 540 - status |= env_set_hex("loadaddr", addr); 548 + status |= !lmb_alloc(SZ_128M, &addr) ? 549 + env_set_hex("loadaddr", addr) : 1; 541 550 } 542 - status |= env_set_hex("scriptaddr", addr_alloc(SZ_4M)); 543 - status |= env_set_hex("pxefile_addr_r", addr_alloc(SZ_4M)); 544 - addr = addr_alloc(SZ_2M); 545 - status |= env_set_hex("fdt_addr_r", addr); 546 551 547 - if (status) 552 + fdt_status |= !lmb_alloc(SZ_2M, &addr) ? 553 + env_set_hex("fdt_addr_r", addr) : 1; 554 + 555 + if (status || fdt_status) 548 556 log_warning("%s: Failed to set run time variables\n", __func__); 549 557 550 558 /* By default copy U-Boots FDT, it will be used as a fallback */ 551 - memcpy((void *)addr, (void *)gd->fdt_blob, fdt32_to_cpu(fdt_blob->totalsize)); 559 + if (fdt_status) 560 + log_warning("%s: Failed to reserve memory for copying FDT\n", 561 + __func__); 562 + else 563 + memcpy((void *)addr, (void *)gd->fdt_blob, 564 + fdt32_to_cpu(fdt_blob->totalsize)); 552 565 553 566 configure_env(); 554 567 qcom_late_init();
+2 -2
arch/powerpc/cpu/mpc85xx/mp.c
··· 410 410 411 411 void cpu_mp_lmb_reserve(void) 412 412 { 413 - u32 bootpg = determine_mp_bootpg(NULL); 413 + phys_addr_t bootpg = determine_mp_bootpg(NULL); 414 414 415 - lmb_reserve(bootpg, 4096, LMB_NONE); 415 + lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &bootpg, 4096, LMB_NONE); 416 416 } 417 417 418 418 void setup_mp(void)
+3 -2
arch/powerpc/lib/misc.c
··· 36 36 size = min(size, (ulong)CFG_SYS_LINUX_LOWMEM_MAX_SIZE); 37 37 38 38 if (size < bootm_size) { 39 - ulong base = bootmap_base + size; 39 + phys_addr_t base = bootmap_base + size; 40 40 41 41 printf("WARNING: adjusting available memory from 0x%lx to 0x%llx\n", 42 42 size, (unsigned long long)bootm_size); 43 - lmb_reserve(base, bootm_size - size, LMB_NONE); 43 + lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &base, 44 + bootm_size - size, LMB_NONE); 44 45 } 45 46 46 47 #ifdef CONFIG_MP
+20 -7
boot/bootm.c
··· 623 623 */ 624 624 if (os.type == IH_TYPE_KERNEL_NOLOAD && os.comp != IH_COMP_NONE) { 625 625 ulong req_size = ALIGN(image_len * 4, SZ_1M); 626 + phys_addr_t addr; 626 627 627 - load = lmb_alloc(req_size, SZ_2M); 628 - if (!load) 628 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ANY, SZ_2M, &addr, 629 + req_size, LMB_NONE); 630 + if (err) 629 631 return 1; 630 - os.load = load; 631 - images->ep = load; 632 + 633 + load = (ulong)addr; 634 + os.load = (ulong)addr; 635 + images->ep = (ulong)addr; 632 636 debug("Allocated %lx bytes at %lx for kernel (size %lx) decompression\n", 633 637 req_size, load, image_len); 634 638 } ··· 698 702 images->os.end = relocated_addr + image_size; 699 703 } 700 704 701 - if (CONFIG_IS_ENABLED(LMB)) 702 - lmb_reserve(images->os.load, (load_end - images->os.load), 703 - LMB_NONE); 705 + if (CONFIG_IS_ENABLED(LMB)) { 706 + phys_addr_t load; 707 + 708 + load = (phys_addr_t)images->os.load; 709 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &load, 710 + (load_end - images->os.load), LMB_NONE); 711 + if (err) { 712 + log_err("Unable to allocate memory %#lx for loading OS\n", 713 + images->os.load); 714 + return 1; 715 + } 716 + } 704 717 705 718 return 0; 706 719 }
+34 -22
boot/image-board.c
··· 16 16 #include <fpga.h> 17 17 #include <image.h> 18 18 #include <init.h> 19 + #include <lmb.h> 19 20 #include <log.h> 20 21 #include <mapmem.h> 21 22 #include <rtc.h> ··· 538 539 int boot_ramdisk_high(ulong rd_data, ulong rd_len, ulong *initrd_start, 539 540 ulong *initrd_end) 540 541 { 542 + int err; 541 543 char *s; 542 544 phys_addr_t initrd_high; 543 545 int initrd_copy_to_ram = 1; ··· 559 561 560 562 if (rd_data) { 561 563 if (!initrd_copy_to_ram) { /* zero-copy ramdisk support */ 564 + phys_addr_t initrd_addr; 565 + 562 566 debug(" in-place initrd\n"); 563 567 *initrd_start = rd_data; 564 568 *initrd_end = rd_data + rd_len; 565 - lmb_reserve(rd_data, rd_len, LMB_NONE); 569 + initrd_addr = (phys_addr_t)rd_data; 570 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &initrd_addr, 571 + rd_len, LMB_NONE); 572 + if (err) { 573 + puts("in-place initrd alloc failed\n"); 574 + goto error; 575 + } 566 576 } else { 567 - if (initrd_high) 568 - *initrd_start = 569 - (ulong)lmb_alloc_base(rd_len, 570 - 0x1000, 571 - initrd_high, 572 - LMB_NONE); 573 - else 574 - *initrd_start = (ulong)lmb_alloc(rd_len, 575 - 0x1000); 577 + enum lmb_mem_type type = initrd_high ? 578 + LMB_MEM_ALLOC_MAX : LMB_MEM_ALLOC_ANY; 576 579 577 - if (*initrd_start == 0) { 580 + err = lmb_alloc_mem(type, 0x1000, &initrd_high, rd_len, 581 + LMB_NONE); 582 + if (err) { 578 583 puts("ramdisk - allocation error\n"); 579 584 goto error; 580 585 } 586 + 587 + *initrd_start = (ulong)initrd_high; 581 588 bootstage_mark(BOOTSTAGE_ID_COPY_RAMDISK); 582 589 583 590 *initrd_end = *initrd_start + rd_len; ··· 828 835 */ 829 836 int boot_get_cmdline(ulong *cmd_start, ulong *cmd_end) 830 837 { 831 - int barg; 838 + int barg, err; 832 839 char *cmdline; 833 840 char *s; 841 + phys_addr_t addr; 834 842 835 843 /* 836 844 * Help the compiler detect that this function is only called when ··· 840 848 return 0; 841 849 842 850 barg = IF_ENABLED_INT(CONFIG_SYS_BOOT_GET_CMDLINE, CONFIG_SYS_BARGSIZE); 843 - cmdline = (char *)(ulong)lmb_alloc_base(barg, 0xf, 844 - env_get_bootm_mapsize() + env_get_bootm_low(), 845 - LMB_NONE); 846 - if (!cmdline) 851 + addr = env_get_bootm_mapsize() + env_get_bootm_low(); 852 + 853 + err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, 0xf, &addr, barg, LMB_NONE); 854 + if (err) 847 855 return -1; 856 + 857 + cmdline = (char *)(uintptr_t)addr; 848 858 849 859 s = env_get("bootargs"); 850 860 if (!s) ··· 874 884 */ 875 885 int boot_get_kbd(struct bd_info **kbd) 876 886 { 877 - *kbd = (struct bd_info *)(ulong)lmb_alloc_base(sizeof(struct bd_info), 878 - 0xf, 879 - env_get_bootm_mapsize() + 880 - env_get_bootm_low(), 881 - LMB_NONE); 882 - if (!*kbd) 887 + int err; 888 + phys_addr_t addr; 889 + 890 + addr = env_get_bootm_mapsize() + env_get_bootm_low(); 891 + err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, 0xf, &addr, 892 + sizeof(struct bd_info), LMB_NONE); 893 + if (err) 883 894 return -1; 884 895 896 + *kbd = (struct bd_info *)(uintptr_t)addr; 885 897 **kbd = *gd->bd; 886 898 887 899 debug("## kernel board info at 0x%08lx\n", (ulong)*kbd);
+47 -22
boot/image-fdt.c
··· 72 72 static void boot_fdt_reserve_region(u64 addr, u64 size, u32 flags) 73 73 { 74 74 long ret; 75 + phys_addr_t rsv_addr; 75 76 76 - ret = lmb_reserve(addr, size, flags); 77 + rsv_addr = (phys_addr_t)addr; 78 + ret = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &rsv_addr, size, flags); 77 79 if (!ret) { 78 80 debug(" reserving fdt memory region: addr=%llx size=%llx flags=%x\n", 79 81 (unsigned long long)addr, 80 82 (unsigned long long)size, flags); 81 - } else if (ret != -EEXIST) { 83 + } else if (ret != -EEXIST && ret != -EINVAL) { 82 84 puts("ERROR: reserving fdt memory region failed "); 83 85 printf("(addr=%llx size=%llx flags=%x)\n", 84 86 (unsigned long long)addr, ··· 155 157 */ 156 158 int boot_relocate_fdt(char **of_flat_tree, ulong *of_size) 157 159 { 158 - u64 start, size, usable, addr, low, mapsize; 160 + u64 start, size, usable, low, mapsize; 159 161 void *fdt_blob = *of_flat_tree; 160 162 void *of_start = NULL; 161 163 char *fdt_high; ··· 163 165 int bank; 164 166 int err; 165 167 int disable_relocation = 0; 168 + phys_addr_t addr; 166 169 167 170 /* nothing to do */ 168 171 if (*of_size == 0) ··· 180 183 /* If fdt_high is set use it to select the relocation address */ 181 184 fdt_high = env_get("fdt_high"); 182 185 if (fdt_high) { 183 - ulong desired_addr = hextoul(fdt_high, NULL); 186 + ulong high_addr = hextoul(fdt_high, NULL); 184 187 185 - if (desired_addr == ~0UL) { 188 + if (high_addr == ~0UL) { 186 189 /* All ones means use fdt in place */ 187 190 of_start = fdt_blob; 188 - lmb_reserve(map_to_sysmem(of_start), of_len, LMB_NONE); 189 - disable_relocation = 1; 190 - } else if (desired_addr) { 191 - addr = lmb_alloc_base(of_len, 0x1000, desired_addr, 192 - LMB_NONE); 193 - of_start = map_sysmem(addr, of_len); 194 - if (of_start == NULL) { 195 - puts("Failed using fdt_high value for Device Tree"); 191 + addr = map_to_sysmem(fdt_blob); 192 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, 193 + of_len, LMB_NONE); 194 + if (err) { 195 + printf("Failed to reserve memory for fdt at %#llx\n", 196 + (u64)addr); 196 197 goto error; 197 198 } 199 + 200 + disable_relocation = 1; 198 201 } else { 199 - addr = lmb_alloc(of_len, 0x1000); 202 + enum lmb_mem_type type = high_addr ? 203 + LMB_MEM_ALLOC_MAX : LMB_MEM_ALLOC_ANY; 204 + 205 + addr = high_addr; 206 + err = lmb_alloc_mem(type, 0x1000, &addr, of_len, 207 + LMB_NONE); 208 + if (err) { 209 + puts("Failed to allocate memory for Device Tree relocation\n"); 210 + goto error; 211 + } 200 212 of_start = map_sysmem(addr, of_len); 201 213 } 202 214 } else { ··· 218 230 * for LMB allocation. 219 231 */ 220 232 usable = min(start + size, low + mapsize); 221 - addr = lmb_alloc_base(of_len, 0x1000, usable, LMB_NONE); 222 - of_start = map_sysmem(addr, of_len); 223 - /* Allocation succeeded, use this block. */ 224 - if (of_start != NULL) 225 - break; 233 + addr = usable; 234 + err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, 0x1000, 235 + &addr, of_len, LMB_NONE); 236 + if (!err) { 237 + of_start = map_sysmem(addr, of_len); 238 + /* Allocation succeeded, use this block. */ 239 + if (of_start) 240 + break; 241 + } 226 242 227 243 /* 228 244 * Reduce the mapping size in the next bank ··· 674 690 675 691 /* Delete the old LMB reservation */ 676 692 if (CONFIG_IS_ENABLED(LMB) && lmb) 677 - lmb_free(map_to_sysmem(blob), fdt_totalsize(blob)); 693 + lmb_free(map_to_sysmem(blob), fdt_totalsize(blob), LMB_NONE); 678 694 679 695 ret = fdt_shrink_to_minimum(blob, 0); 680 696 if (ret < 0) ··· 682 698 of_size = ret; 683 699 684 700 /* Create a new LMB reservation */ 685 - if (CONFIG_IS_ENABLED(LMB) && lmb) 686 - lmb_reserve(map_to_sysmem(blob), of_size, LMB_NONE); 701 + if (CONFIG_IS_ENABLED(LMB) && lmb) { 702 + phys_addr_t fdt_addr; 703 + 704 + fdt_addr = map_to_sysmem(blob); 705 + ret = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &fdt_addr, 706 + of_size, LMB_NONE); 707 + if (ret) { 708 + printf("Failed to reserve memory for the fdt at %#llx\n", 709 + (u64)fdt_addr); 710 + } 711 + } 687 712 688 713 #if defined(CONFIG_ARCH_KEYSTONE) 689 714 if (IS_ENABLED(CONFIG_OF_BOARD_SETUP))
+9 -1
cmd/booti.c
··· 30 30 uint8_t *temp; 31 31 ulong dest; 32 32 ulong dest_end; 33 + phys_addr_t ep_addr; 33 34 unsigned long comp_len; 34 35 unsigned long decomp_len; 35 36 int ctype; ··· 88 89 images->os.start = relocated_addr; 89 90 images->os.end = relocated_addr + image_size; 90 91 91 - lmb_reserve(images->ep, le32_to_cpu(image_size), LMB_NONE); 92 + ep_addr = (phys_addr_t)images->ep; 93 + ret = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &ep_addr, 94 + le32_to_cpu(image_size), LMB_NONE); 95 + if (ret) { 96 + printf("Failed to allocate memory for the image at %#llx\n", 97 + (unsigned long long)images->ep); 98 + return 1; 99 + } 92 100 93 101 /* 94 102 * Handle the BOOTM_STATE_FINDOTHER state ourselves as we do not
+9 -1
cmd/bootz.c
··· 28 28 { 29 29 ulong zi_start, zi_end; 30 30 struct bootm_info bmi; 31 + phys_addr_t ep_addr; 31 32 int ret; 32 33 33 34 bootm_init(&bmi); ··· 56 57 if (ret != 0) 57 58 return 1; 58 59 59 - lmb_reserve(images->ep, zi_end - zi_start, LMB_NONE); 60 + ep_addr = (phys_addr_t)images->ep; 61 + ret = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &ep_addr, zi_end - zi_start, 62 + LMB_NONE); 63 + if (ret) { 64 + printf("Failed to allocate memory for the image at %#llx\n", 65 + (unsigned long long)images->ep); 66 + return 1; 67 + } 60 68 61 69 /* 62 70 * Handle the BOOTM_STATE_FINDOTHER state ourselves as we do not
+6 -3
cmd/load.c
··· 178 178 #endif 179 179 { 180 180 void *dst; 181 + phys_addr_t dst_addr; 181 182 182 - ret = lmb_reserve(store_addr, binlen, LMB_NONE); 183 + dst_addr = (phys_addr_t)store_addr; 184 + ret = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &dst_addr, 185 + binlen, LMB_NONE); 183 186 if (ret) { 184 187 printf("\nCannot overwrite reserved area (%08lx..%08lx)\n", 185 188 store_addr, store_addr + binlen); 186 189 return ret; 187 190 } 188 - dst = map_sysmem(store_addr, binlen); 191 + dst = map_sysmem(dst_addr, binlen); 189 192 memcpy(dst, binbuf, binlen); 190 193 unmap_sysmem(dst); 191 - lmb_free(store_addr, binlen); 194 + lmb_free(dst_addr, binlen, LMB_NONE); 192 195 } 193 196 if ((store_addr) < start_addr) 194 197 start_addr = store_addr;
-1
doc/api/index.rst
··· 17 17 interrupt 18 18 led 19 19 linker_lists 20 - lmb 21 20 logging 22 21 nvmem 23 22 part
-7
doc/api/lmb.rst
··· 1 - .. SPDX-License-Identifier: GPL-2.0+ 2 - 3 - Logical memory blocks 4 - ===================== 5 - 6 - .. kernel-doc:: include/lmb.h 7 - :internal:
+1
doc/develop/index.rst
··· 46 46 cedit 47 47 event 48 48 global_data 49 + lmb 49 50 logging 50 51 makefiles 51 52 menus
+166
doc/develop/lmb.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0+ 2 + 3 + Logical Memory Blocks (LMB) 4 + =========================== 5 + 6 + U-Boot has support for reserving chunks of memory which is primarily 7 + used for loading images to the DRAM memory, before these are booted, 8 + or written to non-volatile storage medium. This functionality is 9 + provided through the Logical Memory Blocks (LMB) module. 10 + 11 + Introduction 12 + ------------ 13 + 14 + The LMB module manages allocation requests for memory region not 15 + occupied by the U-Boot image. Allocation requests that are made 16 + through malloc() and similar functions result in memory getting 17 + allocated from the heap region, which is part of the U-Boot 18 + image. Typically, the heap memory is a few MiB in size. Loading an 19 + image like the linux kernel might require lot more memory than what 20 + the heap can provide. Such allocations are usually handled through the 21 + LMB module. 22 + 23 + The U-Boot image typically gets relocated to the top of the usable 24 + DRAM memory region. A typical memory layout looks as follows:: 25 + 26 + 27 + 28 + 29 + 30 + | | 31 + | | 32 + | | 33 + | | 34 + | | 35 + --- +--------------+ <--- U-Boot ram top 36 + | | | 37 + | | Text | 38 + | +--------------+ 39 + | | | 40 + | | Data | 41 + | +--------------+ 42 + | | | 43 + | | BSS | 44 + U-Boot Image +--------------+ 45 + | | | 46 + | | Heap | 47 + | | | 48 + | +--------------+ 49 + | | | 50 + | | | 51 + | | Stack | 52 + | | | 53 + | | | 54 + --- +--------------+ 55 + | | 56 + | | 57 + | | 58 + | | 59 + | | 60 + | | 61 + | | 62 + | | 63 + | | 64 + | | 65 + | | 66 + | | 67 + | | 68 + | | 69 + +--------------+ <--- ram start 70 + 71 + 72 + 73 + The region of memory below the U-Boot image is the one controlled by 74 + the LMB module. 75 + 76 + 77 + Types of LMB Allocations 78 + ------------------------ 79 + 80 + There are two classes of allocation requests that get made to the LMB 81 + module. One type of allocation requests are requesting memory of a 82 + particular number of bytes. This type of allocation is similar to that 83 + done using the malloc type of function calls. The other type of 84 + allocations, are requests made for a specific memory address. The 85 + second type of allocations are usually made for loading images to a 86 + particular memory address. 87 + 88 + 89 + LMB design Pre 2025.01 90 + ---------------------- 91 + 92 + The earlier versions of U-Boot (pre 2025.01 release) 93 + had a local memory map based LMB implementation whereby it was 94 + possible to declare the LMB map inside a function or a C file. This 95 + design resulted in temporary, non-global LMB maps, which also allowed 96 + for re-use of memory. This meant that it was possible to use a region 97 + of memory to load some image, and subsequently the same region of 98 + memory could be used for loading a different image. A typical example 99 + of this usage would be loading an image to a memory address, followed 100 + by writing that image to some non-volatile storage medium. Once this 101 + is done, the same address can be used for loading a different image 102 + and then writing it to it's non-volatile storage 103 + destination. Typically, environment variables like `loadaddr`, 104 + `kernel_addr_r`, `ramdisk_addr_r` are used for loading images to 105 + memory regions. 106 + 107 + 108 + Current LMB implementation 109 + -------------------------- 110 + 111 + Changes were made in the 2025.01 release to make the LMB memory map 112 + global and persistent. With this, the LMB memory map is the same 113 + across all of U-Boot, and also persists as long as U-Boot is 114 + active. Even with this change, there has been consistency as far as 115 + re-use of memory is concerned to maintain backward compatibility. It 116 + is allowed for re-requesting the same region of memory if the memory 117 + region has a particular attribute (LMB_NONE). 118 + 119 + As part of the platform boot, DRAM memory available for use in U-Boot 120 + gets added to the LMB memory map. Any allocation requests made 121 + subsequently will be made from this memory added as part of the board 122 + init. 123 + 124 + 125 + Allocation API 126 + -------------- 127 + 128 + Any request for non-heap memory can be made through the LMB allocation 129 + API. 130 + 131 + .. code-block:: c 132 + 133 + int lmb_alloc_mem(enum lmb_mem_type type, u64 align, 134 + phys_addr_t *addr, phys_size_t size, 135 + u32 flags); 136 + 137 + Correspondingly, the allocated memory can be free'd 138 + 139 + .. code-block:: c 140 + 141 + long lmb_free(phys_addr_t base, phys_size_t size, u32 flags); 142 + 143 + For a detailed API description, please refer to the header file. 144 + 145 + 146 + UEFI allocations with LMB as the backend 147 + ---------------------------------------- 148 + 149 + The UEFI specification describes boot-time API's for allocation of 150 + memory. These API's use the same memory that is being used by the LMB 151 + module. Pre 2025.01 release, there wasn't any synchronisation between 152 + the EFI sub-system and the LMB module about the memory that was 153 + getting allocated by each of these modules. This was the primary 154 + reason for making the LMB memory map global and persistent. With this 155 + change, the EFI memory allocation API's have also been changed to use 156 + the LMB module as the backend for the allocation requests. Any other 157 + sub-system which might wish to use the same memory region for it's use 158 + can then use the LMB as the backend for the memory allocations and 159 + it's associated book-keeping. 160 + 161 + 162 + API documentation 163 + ----------------- 164 + 165 + .. kernel-doc:: include/lmb.h 166 +
+4 -1
fs/fs.c
··· 580 580 int ret; 581 581 loff_t size; 582 582 loff_t read_len; 583 + phys_addr_t read_addr; 583 584 584 585 /* get the actual size of the file */ 585 586 ret = info->size(filename, &size); ··· 597 598 598 599 lmb_dump_all(); 599 600 600 - if (!lmb_alloc_addr(addr, read_len, LMB_NONE)) 601 + read_addr = (phys_addr_t)addr; 602 + if (!lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &read_addr, read_len, 603 + LMB_NONE)) 601 604 return 0; 602 605 603 606 log_err("** Reading file would overwrite reserved memory **\n");
+46 -59
include/lmb.h
··· 32 32 #define LMB_NONOTIFY BIT(3) 33 33 34 34 /** 35 + * enum lmb_mem_type - type of memory allocation request 36 + * @LMB_MEM_ALLOC_ADDR: request for a particular region of memory 37 + * @LMB_MEM_ALLOC_ANY: allocate any available memory region 38 + * @LMB_MEM_ALLOC_MAX: allocate memory below a particular address 39 + */ 40 + enum lmb_mem_type { 41 + LMB_MEM_ALLOC_ADDR = 1, 42 + LMB_MEM_ALLOC_ANY, 43 + LMB_MEM_ALLOC_MAX, 44 + }; 45 + 46 + /** 35 47 * enum lmb_map_op - memory map operation 36 48 */ 37 49 enum lmb_map_op { ··· 68 80 }; 69 81 70 82 /** 83 + * lmb_alloc_mem() - Request LMB memory 84 + * @type: Type of memory allocation request 85 + * @align: Alignment of the memory region requested(0 for none) 86 + * @addr: Base address of the allocated memory region 87 + * @size: Size in bytes of the allocation request 88 + * @flags: Memory region attributes to be set 89 + * 90 + * Allocate a region of memory where the allocation is based on the parameters 91 + * that have been passed to the function.The first parameter specifies the 92 + * type of allocation that is being requested. The second parameter, @align 93 + * is used to specify if the allocation is to be made with a particular 94 + * alignment. Use 0 for no alignment requirements. 95 + * 96 + * The allocated address is returned through the @addr parameter when @type 97 + * is @LMB_MEM_ALLOC_ANY or @LMB_MEM_ALLOC_MAX. If @type is 98 + * @LMB_MEM_ALLOC_ADDR the @addr parameter would contain the address being 99 + * requested. 100 + * 101 + * The flags parameter is used to specify the memory attributes of the 102 + * requested region. 103 + * 104 + * Return: 0 on success, -ve value on failure 105 + * 106 + * When the allocation is of type @LMB_MEM_ALLOC_ADDR, the return value can 107 + * be -EINVAL if the requested memory region is not part of the LMB memory 108 + * map, and -EEXIST if the requested region is already allocated. 109 + */ 110 + int lmb_alloc_mem(enum lmb_mem_type type, u64 align, phys_addr_t *addr, 111 + phys_size_t size, u32 flags); 112 + 113 + /** 71 114 * lmb_init() - Initialise the LMB module. 72 115 * 73 116 * Return: 0 on success, negative error code on failure. ··· 81 124 */ 82 125 int lmb_init(void); 83 126 84 - /** 85 - * lmb_add_memory() - Add memory range for LMB allocations. 86 - * 87 - * Add the entire available memory range to the pool of memory that 88 - * can be used by the LMB module for allocations. 89 - */ 90 - void lmb_add_memory(void); 91 - 92 127 long lmb_add(phys_addr_t base, phys_size_t size); 93 128 94 - /** 95 - * lmb_reserve() - Reserve one region with a specific flags bitfield 96 - * @base: Base address of the memory region 97 - * @size: Size of the memory region 98 - * @flags: Flags for the memory region 99 - * 100 - * Return: 101 - * * %0 - Added successfully, or it's already added (only if LMB_NONE) 102 - * * %-EEXIST - The region is already added, and flags != LMB_NONE 103 - * * %-1 - Failure 104 - */ 105 - long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags); 106 - 107 - phys_addr_t lmb_alloc(phys_size_t size, ulong align); 108 129 phys_size_t lmb_get_free_size(phys_addr_t addr); 109 130 110 131 /** 111 - * lmb_alloc_base() - Allocate specified memory region with specified 112 - * attributes 113 - * @size: Size of the region requested 114 - * @align: Alignment of the memory region requested 115 - * @max_addr: Maximum address of the requested region 116 - * @flags: Memory region attributes to be set 117 - * 118 - * Allocate a region of memory with the attributes specified through the 119 - * parameter. The max_addr parameter is used to specify the maximum address 120 - * below which the requested region should be allocated. 121 - * 122 - * Return: Base address on success, 0 on error. 123 - */ 124 - phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr, 125 - uint flags); 126 - 127 - /** 128 - * lmb_alloc_addr() - Allocate specified memory address with specified attributes 129 - * 130 - * @base: Base Address requested 131 - * @size: Size of the region requested 132 - * @flags: Memory region attributes to be set 133 - * 134 - * Allocate a region of memory with the attributes specified through the 135 - * parameter. The base parameter is used to specify the base address 136 - * of the requested region. 137 - * 138 - * Return: 0 on success -1 on error 139 - */ 140 - int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags); 141 - 142 - /** 143 132 * lmb_is_reserved_flags() - Test if address is in reserved region with flag 144 133 * bits set 145 134 * @addr: Address to be tested ··· 153 142 int lmb_is_reserved_flags(phys_addr_t addr, int flags); 154 143 155 144 /** 156 - * lmb_free_flags() - Free up a region of memory 145 + * lmb_free() - Free up a region of memory 157 146 * @base: Base Address of region to be freed 158 147 * @size: Size of the region to be freed 159 148 * @flags: Memory region attributes 160 149 * 161 150 * Return: 0 on success, negative error code on failure. 162 151 */ 163 - long lmb_free_flags(phys_addr_t base, phys_size_t size, uint flags); 164 - 165 - long lmb_free(phys_addr_t base, phys_size_t size); 152 + long lmb_free(phys_addr_t base, phys_size_t size, u32 flags); 166 153 167 154 void lmb_dump_all(void); 168 155 void lmb_dump_all_force(void); ··· 175 162 176 163 static inline int lmb_read_check(phys_addr_t addr, phys_size_t len) 177 164 { 178 - return lmb_alloc_addr(addr, len, LMB_NONE); 165 + return lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, len, LMB_NONE); 179 166 } 180 167 181 168 /**
+12 -10
lib/efi_loader/efi_memory.c
··· 454 454 enum efi_memory_type memory_type, 455 455 efi_uintn_t pages, uint64_t *memory) 456 456 { 457 + int err; 457 458 u64 efi_addr, len; 458 459 uint flags; 459 460 efi_status_t ret; ··· 475 476 switch (type) { 476 477 case EFI_ALLOCATE_ANY_PAGES: 477 478 /* Any page */ 478 - addr = (u64)lmb_alloc_base(len, EFI_PAGE_SIZE, 479 - LMB_ALLOC_ANYWHERE, flags); 480 - if (!addr) 479 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ANY, EFI_PAGE_SIZE, &addr, 480 + len, flags); 481 + if (err) 481 482 return EFI_OUT_OF_RESOURCES; 482 483 break; 483 484 case EFI_ALLOCATE_MAX_ADDRESS: 484 485 /* Max address */ 485 486 addr = map_to_sysmem((void *)(uintptr_t)*memory); 486 - addr = (u64)lmb_alloc_base(len, EFI_PAGE_SIZE, addr, 487 - flags); 488 - if (!addr) 487 + 488 + err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, EFI_PAGE_SIZE, &addr, 489 + len, flags); 490 + if (err) 489 491 return EFI_OUT_OF_RESOURCES; 490 492 break; 491 493 case EFI_ALLOCATE_ADDRESS: ··· 493 495 return EFI_NOT_FOUND; 494 496 495 497 addr = map_to_sysmem((void *)(uintptr_t)*memory); 496 - if (lmb_alloc_addr(addr, len, flags)) 498 + if (lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, len, flags)) 497 499 return EFI_NOT_FOUND; 498 500 break; 499 501 default: ··· 506 508 ret = efi_update_memory_map(efi_addr, pages, memory_type, true, false); 507 509 if (ret != EFI_SUCCESS) { 508 510 /* Map would overlap, bail out */ 509 - lmb_free_flags(addr, (u64)pages << EFI_PAGE_SHIFT, flags); 511 + lmb_free(addr, (u64)pages << EFI_PAGE_SHIFT, flags); 510 512 unmap_sysmem((void *)(uintptr_t)efi_addr); 511 513 return EFI_OUT_OF_RESOURCES; 512 514 } ··· 546 548 * been mapped with map_sysmem() from efi_allocate_pages(). Convert 547 549 * it back to an address LMB understands 548 550 */ 549 - status = lmb_free_flags(map_to_sysmem((void *)(uintptr_t)memory), len, 550 - LMB_NOOVERWRITE); 551 + status = lmb_free(map_to_sysmem((void *)(uintptr_t)memory), len, 552 + LMB_NOOVERWRITE); 551 553 if (status) 552 554 return EFI_NOT_FOUND; 553 555
+97 -86
lib/lmb.c
··· 317 317 rgn[i].flags); 318 318 } 319 319 320 - static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base, 321 - phys_size_t size) 320 + /** 321 + * lmb_overlap_checks() - perform checks to see if region can be allocated or reserved 322 + * @lmb_rgn_lst: list of LMB regions 323 + * @base: base address of region to be checked 324 + * @size: size of region to be checked 325 + * @flags: flag of the region to be checked (only for reservation requests) 326 + * @alloc: if checks are to be done for allocation or reservation request 327 + * 328 + * Check if the region passed to the function overlaps with any one of 329 + * the regions of the passed lmb region list. 330 + * 331 + * If the @alloc flag is set to true, this check stops as soon an 332 + * overlapping region is found. The function can also be called to 333 + * check if a reservation request can be satisfied, by setting 334 + * @alloc to false. In that case, the function then iterates through 335 + * all the regions in the list passed to ensure that the requested 336 + * region does not overlap with any existing regions. An overlap is 337 + * allowed only when the flag of the requested region and the existing 338 + * region is LMB_NONE. 339 + * 340 + * Return: index of the overlapping region, -1 if no overlap is found 341 + * 342 + * When the function is called for a reservation request check, -1 will 343 + * also be returned when there is an allowed overlap, i.e. requested 344 + * region and existing regions have flags as LMB_NONE. 345 + */ 346 + static long lmb_overlap_checks(struct alist *lmb_rgn_lst, phys_addr_t base, 347 + phys_size_t size, u32 flags, bool alloc) 322 348 { 323 349 unsigned long i; 324 350 struct lmb_region *rgn = lmb_rgn_lst->data; ··· 326 352 for (i = 0; i < lmb_rgn_lst->count; i++) { 327 353 phys_addr_t rgnbase = rgn[i].base; 328 354 phys_size_t rgnsize = rgn[i].size; 355 + u32 rgnflags = rgn[i].flags; 329 356 330 - if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) 331 - break; 357 + if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { 358 + if (alloc || flags != LMB_NONE || flags != rgnflags) 359 + break; 360 + } 332 361 } 333 362 334 363 return (i < lmb_rgn_lst->count) ? i : -1; ··· 390 419 base = ALIGN_DOWN(lmbbase + lmbsize - size, align); 391 420 392 421 while (base && lmbbase <= base) { 393 - rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size); 422 + rgn = lmb_overlap_checks(&io_lmb->used_mem, base, size, 423 + LMB_NOOVERWRITE, true); 394 424 if (rgn < 0) { 395 425 /* This area isn't reserved, take it */ 396 426 if (lmb_add_region_flags(&io_lmb->used_mem, base, ··· 488 518 #endif 489 519 } 490 520 521 + static long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags) 522 + { 523 + long ret = 0; 524 + struct alist *lmb_rgn_lst = &lmb.used_mem; 525 + 526 + if (lmb_overlap_checks(lmb_rgn_lst, base, size, flags, false) != -1) 527 + return -EEXIST; 528 + 529 + ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags); 530 + if (ret) 531 + return ret; 532 + 533 + return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags); 534 + } 535 + 491 536 static void lmb_reserve_uboot_region(void) 492 537 { 493 538 int bank; ··· 557 602 } 558 603 } 559 604 560 - /** 561 - * lmb_can_reserve_region() - check if the region can be reserved 562 - * @base: base address of region to be reserved 563 - * @size: size of region to be reserved 564 - * @flags: flag of the region to be reserved 565 - * 566 - * Go through all the reserved regions and ensure that the requested 567 - * region does not overlap with any existing regions. An overlap is 568 - * allowed only when the flag of the request region and the existing 569 - * region is LMB_NONE. 570 - * 571 - * Return: true if region can be reserved, false otherwise 572 - */ 573 - static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size, 574 - u32 flags) 575 - { 576 - uint i; 577 - struct lmb_region *lmb_reserved = lmb.used_mem.data; 578 - 579 - for (i = 0; i < lmb.used_mem.count; i++) { 580 - u32 rgnflags = lmb_reserved[i].flags; 581 - phys_addr_t rgnbase = lmb_reserved[i].base; 582 - phys_size_t rgnsize = lmb_reserved[i].size; 583 - 584 - if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { 585 - if (flags != LMB_NONE || flags != rgnflags) 586 - return false; 587 - } 588 - } 589 - 590 - return true; 591 - } 592 - 593 - void lmb_add_memory(void) 605 + static void lmb_add_memory(void) 594 606 { 595 607 int i; 596 608 phys_addr_t bank_end; ··· 640 652 return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE); 641 653 } 642 654 643 - long lmb_free_flags(phys_addr_t base, phys_size_t size, 644 - uint flags) 655 + long lmb_free(phys_addr_t base, phys_size_t size, u32 flags) 645 656 { 646 657 long ret; 647 658 ··· 652 663 return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags); 653 664 } 654 665 655 - long lmb_free(phys_addr_t base, phys_size_t size) 656 - { 657 - return lmb_free_flags(base, size, LMB_NONE); 658 - } 659 - 660 - long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags) 661 - { 662 - long ret = 0; 663 - struct alist *lmb_rgn_lst = &lmb.used_mem; 664 - 665 - if (!lmb_can_reserve_region(base, size, flags)) 666 - return -EEXIST; 667 - 668 - ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags); 669 - if (ret) 670 - return ret; 671 - 672 - return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags); 673 - } 674 - 675 - static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align, 676 - phys_addr_t max_addr, u32 flags) 666 + static int _lmb_alloc_base(phys_size_t size, ulong align, 667 + phys_addr_t *addr, u32 flags) 677 668 { 678 669 int ret; 679 670 long i, rgn; 671 + phys_addr_t max_addr; 680 672 phys_addr_t base = 0; 681 673 phys_addr_t res_base; 682 674 struct lmb_region *lmb_used = lmb.used_mem.data; 683 675 struct lmb_region *lmb_memory = lmb.available_mem.data; 684 676 677 + max_addr = *addr; 685 678 for (i = lmb.available_mem.count - 1; i >= 0; i--) { 686 679 phys_addr_t lmbbase = lmb_memory[i].base; 687 680 phys_size_t lmbsize = lmb_memory[i].size; ··· 702 695 } 703 696 704 697 while (base && lmbbase <= base) { 705 - rgn = lmb_overlaps_region(&lmb.used_mem, base, size); 698 + rgn = lmb_overlap_checks(&lmb.used_mem, base, size, 699 + LMB_NOOVERWRITE, true); 706 700 if (rgn < 0) { 707 701 /* This area isn't reserved, take it */ 708 702 if (lmb_add_region_flags(&lmb.used_mem, base, ··· 714 708 flags); 715 709 if (ret) 716 710 return ret; 717 - 718 - return base; 711 + *addr = base; 712 + return 0; 719 713 } 720 714 721 715 res_base = lmb_used[rgn].base; ··· 728 722 log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n", 729 723 __func__, (ulong)size, (ulong)max_addr); 730 724 731 - return 0; 725 + return -1; 732 726 } 733 727 734 - phys_addr_t lmb_alloc(phys_size_t size, ulong align) 735 - { 736 - return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE); 737 - } 738 - 739 - phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr, 740 - uint flags) 741 - { 742 - return _lmb_alloc_base(size, align, max_addr, flags); 743 - } 744 - 745 - int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) 728 + static int _lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) 746 729 { 747 730 long rgn; 748 731 struct lmb_region *lmb_memory = lmb.available_mem.data; 749 732 750 733 /* Check if the requested address is in one of the memory regions */ 751 - rgn = lmb_overlaps_region(&lmb.available_mem, base, size); 734 + rgn = lmb_overlap_checks(&lmb.available_mem, base, size, 735 + LMB_NOOVERWRITE, true); 752 736 if (rgn >= 0) { 753 737 /* 754 738 * Check if the requested end address is in the same memory ··· 756 740 */ 757 741 if (lmb_addrs_overlap(lmb_memory[rgn].base, 758 742 lmb_memory[rgn].size, 759 - base + size - 1, 1)) { 743 + base + size - 1, 1)) 760 744 /* ok, reserve the memory */ 761 - if (!lmb_reserve(base, size, flags)) 762 - return 0; 763 - } 745 + return lmb_reserve(base, size, flags); 746 + } 747 + 748 + return -EINVAL; 749 + } 750 + 751 + int lmb_alloc_mem(enum lmb_mem_type type, u64 align, phys_addr_t *addr, 752 + phys_size_t size, u32 flags) 753 + { 754 + int ret = -1; 755 + 756 + if (!size) 757 + return 0; 758 + 759 + if (!addr) 760 + return -EINVAL; 761 + 762 + switch (type) { 763 + case LMB_MEM_ALLOC_ANY: 764 + *addr = LMB_ALLOC_ANYWHERE; 765 + case LMB_MEM_ALLOC_MAX: 766 + ret = _lmb_alloc_base(size, align, addr, flags); 767 + break; 768 + case LMB_MEM_ALLOC_ADDR: 769 + ret = _lmb_alloc_addr(*addr, size, flags); 770 + break; 771 + default: 772 + log_debug("%s: Invalid memory allocation type requested %d\n", 773 + __func__, type); 764 774 } 765 775 766 - return -1; 776 + return ret; 767 777 } 768 778 769 779 /* Return number of bytes from a given address that are free */ ··· 775 785 struct lmb_region *lmb_memory = lmb.available_mem.data; 776 786 777 787 /* check if the requested address is in the memory regions */ 778 - rgn = lmb_overlaps_region(&lmb.available_mem, addr, 1); 788 + rgn = lmb_overlap_checks(&lmb.available_mem, addr, 1, LMB_NOOVERWRITE, 789 + true); 779 790 if (rgn >= 0) { 780 791 for (i = 0; i < lmb.used_mem.count; i++) { 781 792 if (addr < lmb_used[i].base) {
+71 -31
test/lib/lmb.c
··· 71 71 return 0; 72 72 } 73 73 74 + static int lmb_reserve(phys_addr_t addr, phys_size_t size, u32 flags) 75 + { 76 + int err; 77 + 78 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ADDR, 0, &addr, size, flags); 79 + if (err) 80 + return err; 81 + 82 + return 0; 83 + } 84 + 85 + static phys_addr_t lmb_alloc(phys_size_t size, ulong align) 86 + { 87 + int err; 88 + phys_addr_t addr; 89 + 90 + err = lmb_alloc_mem(LMB_MEM_ALLOC_ANY, align, &addr, size, LMB_NONE); 91 + if (err) 92 + return 0; 93 + 94 + return addr; 95 + } 96 + 97 + static phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, 98 + phys_addr_t max_addr, u32 flags) 99 + { 100 + int err; 101 + phys_addr_t addr; 102 + 103 + addr = max_addr; 104 + err = lmb_alloc_mem(LMB_MEM_ALLOC_MAX, align, &addr, size, flags); 105 + if (err) 106 + return 0; 107 + 108 + return addr; 109 + } 110 + 111 + #define lmb_alloc_addr(addr, size, flags) lmb_reserve(addr, size, flags) 112 + 74 113 static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram, 75 114 const phys_size_t ram_size, const phys_addr_t ram0, 76 115 const phys_size_t ram0_size, ··· 143 182 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 144 183 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0); 145 184 146 - ret = lmb_free(a, 4); 185 + ret = lmb_free(a, 4, LMB_NONE); 147 186 ut_asserteq(ret, 0); 148 187 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 149 188 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); ··· 152 191 ut_asserteq(a, a2); 153 192 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 154 193 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0); 155 - ret = lmb_free(a2, 4); 194 + ret = lmb_free(a2, 4, LMB_NONE); 156 195 ut_asserteq(ret, 0); 157 196 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 158 197 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); 159 198 160 - ret = lmb_free(b, 4); 199 + ret = lmb_free(b, 4, LMB_NONE); 161 200 ut_asserteq(ret, 0); 162 201 ASSERT_LMB(mem_lst, used_lst, 0, 0, 3, 163 202 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, ··· 167 206 ut_asserteq(b, b2); 168 207 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 169 208 alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); 170 - ret = lmb_free(b2, 4); 209 + ret = lmb_free(b2, 4, LMB_NONE); 171 210 ut_asserteq(ret, 0); 172 211 ASSERT_LMB(mem_lst, used_lst, 0, 0, 3, 173 212 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 174 213 ram_end - 8, 4); 175 214 176 - ret = lmb_free(c, 4); 215 + ret = lmb_free(c, 4, LMB_NONE); 177 216 ut_asserteq(ret, 0); 178 217 ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, 179 218 alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0); 180 - ret = lmb_free(d, 4); 219 + ret = lmb_free(d, 4, LMB_NONE); 181 220 ut_asserteq(ret, 0); 182 221 ASSERT_LMB(mem_lst, used_lst, 0, 0, 1, alloc_64k_addr, 0x10000, 183 222 0, 0, 0, 0); ··· 281 320 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, 282 321 big_block_size + 0x10000, 0, 0, 0, 0); 283 322 284 - ret = lmb_free(a, big_block_size); 323 + ret = lmb_free(a, big_block_size, LMB_NONE); 285 324 ut_asserteq(ret, 0); 286 325 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000, 287 326 0, 0, 0, 0); ··· 353 392 - alloc_size_aligned, alloc_size, 0, 0); 354 393 } 355 394 /* and free them */ 356 - ret = lmb_free(b, alloc_size); 395 + ret = lmb_free(b, alloc_size, LMB_NONE); 357 396 ut_asserteq(ret, 0); 358 397 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 359 398 ram + ram_size - alloc_size_aligned, 360 399 alloc_size, 0, 0, 0, 0); 361 - ret = lmb_free(a, alloc_size); 400 + ret = lmb_free(a, alloc_size, LMB_NONE); 362 401 ut_asserteq(ret, 0); 363 402 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); 364 403 ··· 369 408 ram + ram_size - alloc_size_aligned, 370 409 alloc_size, 0, 0, 0, 0); 371 410 /* and free it */ 372 - ret = lmb_free(b, alloc_size); 411 + ret = lmb_free(b, alloc_size, LMB_NONE); 373 412 ut_asserteq(ret, 0); 374 413 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); 375 414 ··· 437 476 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4, 438 477 0, 0, 0, 0); 439 478 /* check that this was an error by freeing b */ 440 - ret = lmb_free(b, 4); 479 + ret = lmb_free(b, 4, LMB_NONE); 441 480 ut_asserteq(ret, -1); 442 481 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4, 443 482 0, 0, 0, 0); 444 483 445 - ret = lmb_free(a, ram_size - 4); 484 + ret = lmb_free(a, ram_size - 4, LMB_NONE); 446 485 ut_asserteq(ret, 0); 447 486 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); 448 487 ··· 568 607 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE); 569 608 ut_asserteq(b, 0); 570 609 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE); 571 - ut_asserteq(b, -1); 610 + ut_asserteq(b, -EEXIST); 572 611 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE); 573 612 ut_asserteq(b, 0); 574 613 b = lmb_alloc_addr(alloc_addr_a, 0x2000, LMB_NONE); 575 614 ut_asserteq(b, 0); 576 - ret = lmb_free(alloc_addr_a, 0x2000); 615 + ret = lmb_free(alloc_addr_a, 0x2000, LMB_NONE); 577 616 ut_asserteq(ret, 0); 578 617 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE); 579 618 ut_asserteq(b, 0); 580 619 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NONE); 581 - ut_asserteq(b, -1); 620 + ut_asserteq(b, -EEXIST); 582 621 b = lmb_alloc_addr(alloc_addr_a, 0x1000, LMB_NOOVERWRITE); 583 - ut_asserteq(b, -1); 584 - ret = lmb_free(alloc_addr_a, 0x1000); 622 + ut_asserteq(b, -EEXIST); 623 + ret = lmb_free(alloc_addr_a, 0x1000, LMB_NONE); 585 624 ut_asserteq(ret, 0); 586 625 587 626 /* ··· 599 638 alloc_addr_a + 0x4000, 0x1000, 0, 0); 600 639 601 640 c = lmb_alloc_addr(alloc_addr_a + 0x1000, 0x5000, LMB_NONE); 602 - ut_asserteq(c, -1); 641 + ut_asserteq(c, -EEXIST); 603 642 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000, 604 643 alloc_addr_a + 0x4000, 0x1000, 0, 0); 605 644 606 - ret = lmb_free(alloc_addr_a, 0x1000); 645 + ret = lmb_free(alloc_addr_a, 0x1000, LMB_NONE); 607 646 ut_asserteq(ret, 0); 608 - ret = lmb_free(alloc_addr_a + 0x4000, 0x1000); 647 + ret = lmb_free(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE); 609 648 ut_asserteq(ret, 0); 610 649 611 650 /* ··· 628 667 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_addr_a, 0x6000, 629 668 0, 0, 0, 0); 630 669 631 - ret = lmb_free(alloc_addr_a, 0x6000); 670 + ret = lmb_free(alloc_addr_a, 0x6000, LMB_NONE); 632 671 ut_asserteq(ret, 0); 633 672 634 673 /* ··· 646 685 alloc_addr_a + 0x4000, 0x1000, 0, 0); 647 686 648 687 c = lmb_alloc_addr(alloc_addr_a + 0x1000, 0x5000, LMB_NOOVERWRITE); 649 - ut_asserteq(c, -1); 688 + ut_asserteq(c, -EEXIST); 650 689 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, alloc_addr_a, 0x1000, 651 690 alloc_addr_a + 0x4000, 0x1000, 0, 0); 652 691 653 - ret = lmb_free(alloc_addr_a, 0x1000); 692 + ret = lmb_free(alloc_addr_a, 0x1000, LMB_NOOVERWRITE); 654 693 ut_asserteq(ret, 0); 655 - ret = lmb_free(alloc_addr_a + 0x4000, 0x1000); 694 + ret = lmb_free(alloc_addr_a + 0x4000, 0x1000, LMB_NOOVERWRITE); 656 695 ut_asserteq(ret, 0); 657 696 658 697 /* reserve 3 blocks */ ··· 693 732 0, 0, 0, 0); 694 733 695 734 /* free thge allocation from d */ 696 - ret = lmb_free(alloc_addr_c + 0x10000, ram_end - alloc_addr_c - 0x10000); 735 + ret = lmb_free(alloc_addr_c + 0x10000, ram_end - alloc_addr_c - 0x10000, 736 + LMB_NONE); 697 737 ut_asserteq(ret, 0); 698 738 699 739 /* allocate at 3 points in free range */ ··· 702 742 ut_asserteq(d, 0); 703 743 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000, 704 744 ram_end - 4, 4, 0, 0); 705 - ret = lmb_free(ram_end - 4, 4); 745 + ret = lmb_free(ram_end - 4, 4, LMB_NONE); 706 746 ut_asserteq(ret, 0); 707 747 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000, 708 748 0, 0, 0, 0); ··· 711 751 ut_asserteq(d, 0); 712 752 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000, 713 753 ram_end - 128, 4, 0, 0); 714 - ret = lmb_free(ram_end - 128, 4); 754 + ret = lmb_free(ram_end - 128, 4, LMB_NONE); 715 755 ut_asserteq(ret, 0); 716 756 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000, 717 757 0, 0, 0, 0); ··· 720 760 ut_asserteq(d, 0); 721 761 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010004, 722 762 0, 0, 0, 0); 723 - ret = lmb_free(alloc_addr_c + 0x10000, 4); 763 + ret = lmb_free(alloc_addr_c + 0x10000, 4, LMB_NONE); 724 764 ut_asserteq(ret, 0); 725 765 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000, 726 766 0, 0, 0, 0); 727 767 728 768 /* allocate at the bottom a was assigned to ram at the top */ 729 - ret = lmb_free(ram, alloc_addr_a - ram); 769 + ret = lmb_free(ram, alloc_addr_a - ram, LMB_NONE); 730 770 ut_asserteq(ret, 0); 731 771 ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram + 0x8000000, 732 772 0x10010000, 0, 0, 0, 0); ··· 739 779 /* check that allocating outside memory fails */ 740 780 if (ram_end != 0) { 741 781 ret = lmb_alloc_addr(ram_end, 1, LMB_NONE); 742 - ut_asserteq(ret, -1); 782 + ut_asserteq(ret, -EINVAL); 743 783 } 744 784 if (ram != 0) { 745 785 ret = lmb_alloc_addr(ram - 1, 1, LMB_NONE); 746 - ut_asserteq(ret, -1); 786 + ut_asserteq(ret, -EINVAL); 747 787 } 748 788 749 789 lmb_pop(&store);