"Das U-Boot" Source Tree
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'dm-pull-2jun24-take2' of https://source.denx.de/u-boot/custodians/u-boot-dm

buildman CI improvements
binman fixes and assumed size
partial tools fixes for Python 3.12
patman enhancements

Tom Rini 4d338362 65fbdab2

+933 -428
+3 -3
arch/sandbox/cpu/os.c
··· 109 109 */ 110 110 flags |= O_CLOEXEC; 111 111 112 - return open(pathname, flags, 0777); 112 + return open(pathname, flags, 0644); 113 113 } 114 114 115 115 int os_close(int fd) ··· 746 746 struct sandbox_state *state = state_get_current(); 747 747 int fd, ret; 748 748 749 - fd = open(fname, O_CREAT | O_WRONLY, 0777); 749 + fd = open(fname, O_CREAT | O_WRONLY, 0644); 750 750 if (fd < 0) 751 751 return -ENOENT; 752 752 ret = write(fd, state->ram_buf, state->ram_size); ··· 791 791 if (write(fd, data, size) < 0) 792 792 return -EIO; 793 793 close(fd); 794 - if (chmod(fname, 0777)) 794 + if (chmod(fname, 0755)) 795 795 return -ENOEXEC; 796 796 797 797 return 0;
+4 -16
arch/sandbox/cpu/u-boot.lds
··· 19 19 *(_u_boot_sandbox_getopt_end) 20 20 } 21 21 22 - efi_runtime_start : { 23 - *(___efi_runtime_start) 24 - } 25 - 26 22 efi_runtime : { 23 + __efi_runtime_start = .; 27 24 *(efi_runtime_text) 28 25 *(efi_runtime_data) 29 - } 30 - 31 - efi_runtime_stop : { 32 - *(___efi_runtime_stop) 33 - } 34 - 35 - efi_runtime_rel_start : { 36 - *(___efi_runtime_rel_start) 26 + __efi_runtime_stop = .; 37 27 } 38 28 39 29 efi_runtime_rel : { 30 + __efi_runtime_rel_start = .; 40 31 *(.relefi_runtime_text) 41 32 *(.relefi_runtime_data) 42 - } 43 - 44 - efi_runtime_rel_stop : { 45 - *(___efi_runtime_rel_stop) 33 + __efi_runtime_rel_stop = .; 46 34 } 47 35 48 36 .dynsym :
+1 -1
arch/sandbox/lib/Makefile
··· 5 5 # (C) Copyright 2002-2006 6 6 # Wolfgang Denk, DENX Software Engineering, wd@denx.de. 7 7 8 - obj-y += fdt_fixup.o interrupts.o sections.o 8 + obj-y += fdt_fixup.o interrupts.o 9 9 obj-$(CONFIG_PCI) += pci_io.o 10 10 obj-$(CONFIG_CMD_BOOTM) += bootm.o 11 11 obj-$(CONFIG_CMD_BOOTZ) += bootm.o
-13
arch/sandbox/lib/sections.c
··· 1 - // SPDX-License-Identifier: GPL-2.0+ 2 - /* 3 - * Copyright 2013 Albert ARIBAUD <albert.u.boot@aribaud.net> 4 - * 5 - */ 6 - #include <linux/compiler.h> 7 - 8 - char __efi_runtime_start[0] __section("___efi_runtime_start"); 9 - char __efi_runtime_stop[0] __section("___efi_runtime_stop"); 10 - char __efi_runtime_rel_start[0] 11 - __section("___efi_runtime_rel_start"); 12 - char __efi_runtime_rel_stop[0] 13 - __section("___efi_runtime_rel_stop");
+5
arch/x86/dts/u-boot.dtsi
··· 24 24 #ifdef CONFIG_HAVE_INTEL_ME 25 25 intel-descriptor { 26 26 filename = CONFIG_FLASH_DESCRIPTOR_FILE; 27 + assume-size = <0x1000>; 27 28 }; 28 29 intel-me { 29 30 filename = CONFIG_INTEL_ME_FILE; 31 + assume-size = <0x1ff000>; 30 32 }; 31 33 #endif 32 34 #ifdef CONFIG_TPL ··· 87 89 #ifdef CONFIG_HAVE_MRC 88 90 intel-mrc { 89 91 offset = <CFG_X86_MRC_ADDR>; 92 + assume-size = <0x2fc94>; 90 93 }; 91 94 #endif 92 95 #ifdef CONFIG_FSP_VERSION1 ··· 98 101 #ifdef CONFIG_FSP_VERSION2 99 102 intel-descriptor { 100 103 filename = CONFIG_FLASH_DESCRIPTOR_FILE; 104 + assume-size = <4096>; 101 105 }; 102 106 intel-ifwi { 103 107 filename = CONFIG_IFWI_INPUT_FILE; ··· 139 143 intel-vga { 140 144 filename = CONFIG_VGA_BIOS_FILE; 141 145 offset = <CONFIG_VGA_BIOS_ADDR>; 146 + assume-size = <0x10000>; 142 147 }; 143 148 #endif 144 149 #ifdef CONFIG_HAVE_VBT
-2
arch/x86/lib/acpi_table.c
··· 478 478 /* Fill out header fields. */ 479 479 acpi_fill_header(header, "HPET"); 480 480 481 - header->creator_revision = ASL_REVISION; 482 481 header->length = sizeof(struct acpi_hpet); 483 482 header->revision = acpi_get_table_revision(ACPITAB_HPET); 484 483 ··· 569 568 memcpy(header->oem_id, OEM_ID, 6); 570 569 memcpy(header->oem_table_id, OEM_TABLE_ID, 8); 571 570 memcpy(header->creator_id, ASLC_ID, 4); 572 - header->creator_revision = 1; 573 571 574 572 fadt->x_firmware_ctrl = map_to_sysmem(facs); 575 573 fadt->x_dsdt = map_to_sysmem(dsdt);
+2 -2
boot/bootmeth_cros.c
··· 147 147 { 148 148 struct blk_desc *desc = dev_get_uclass_plat(blk); 149 149 struct vb2_keyblock *hdr; 150 - struct uuid type; 150 + efi_guid_t type; 151 151 ulong num_blks; 152 152 int ret; 153 153 ··· 160 160 161 161 /* Check for kernel partition type */ 162 162 log_debug("part %x: type=%s\n", partnum, info->type_guid); 163 - if (uuid_str_to_bin(info->type_guid, (u8 *)&type, UUID_STR_FORMAT_GUID)) 163 + if (uuid_str_to_bin(info->type_guid, type.b, UUID_STR_FORMAT_GUID)) 164 164 return log_msg_ret("typ", -EINVAL); 165 165 166 166 if (memcmp(&cros_kern_type, &type, sizeof(type)))
+3 -1
cmd/unlz4.c
··· 6 6 7 7 #include <command.h> 8 8 #include <env.h> 9 + #include <mapmem.h> 9 10 #include <vsprintf.h> 10 11 #include <u-boot/lz4.h> 11 12 ··· 26 27 return CMD_RET_USAGE; 27 28 } 28 29 29 - ret = ulz4fn((void *)src, src_len, (void *)dst, &dst_len); 30 + ret = ulz4fn(map_sysmem(src, 0), src_len, map_sysmem(dst, dst_len), 31 + &dst_len); 30 32 if (ret) { 31 33 printf("Uncompressed err :%d\n", ret); 32 34 return 1;
+1 -2
common/board_r.c
··· 230 230 231 231 oftree_reset(); 232 232 233 - /* Save the pre-reloc driver model and start a new one */ 234 - gd->dm_root_f = gd->dm_root; 233 + /* Drop the pre-reloc driver model and start a new one */ 235 234 gd->dm_root = NULL; 236 235 #ifdef CONFIG_TIMER 237 236 gd->timer = NULL;
+1
configs/sandbox64_defconfig
··· 267 267 CONFIG_TPM=y 268 268 CONFIG_ERRNO_STR=y 269 269 CONFIG_GETOPT=y 270 + CONFIG_EFI_RT_VOLATILE_STORE=y 270 271 CONFIG_EFI_SECURE_BOOT=y 271 272 CONFIG_TEST_FDTDEC=y 272 273 CONFIG_UNIT_TEST=y
+1
configs/sandbox_defconfig
··· 348 348 CONFIG_TPM=y 349 349 CONFIG_ERRNO_STR=y 350 350 CONFIG_GETOPT=y 351 + CONFIG_EFI_RT_VOLATILE_STORE=y 351 352 CONFIG_EFI_RUNTIME_UPDATE_CAPSULE=y 352 353 CONFIG_EFI_CAPSULE_ON_DISK=y 353 354 CONFIG_EFI_CAPSULE_FIRMWARE_RAW=y
+1 -1
drivers/core/Kconfig
··· 56 56 out - it will do nothing when called. 57 57 58 58 config SPL_DM_WARN 59 - bool "Enable warnings in driver model wuth SPL" 59 + bool "Enable warnings in driver model in SPL" 60 60 depends on SPL_DM 61 61 help 62 62 Enable this to see warnings related to driver model in SPL
+1 -1
drivers/core/device.c
··· 58 58 59 59 ret = uclass_get(drv->id, &uc); 60 60 if (ret) { 61 - debug("Missing uclass for driver %s\n", drv->name); 61 + dm_warn("Missing uclass for driver %s\n", drv->name); 62 62 return ret; 63 63 } 64 64
+4 -3
drivers/core/fdtaddr.c
··· 15 15 #include <asm/global_data.h> 16 16 #include <asm/io.h> 17 17 #include <dm/device-internal.h> 18 + #include <dm/util.h> 18 19 19 20 DECLARE_GLOBAL_DATA_PTR; 20 21 ··· 32 33 33 34 na = fdt_address_cells(gd->fdt_blob, parent); 34 35 if (na < 1) { 35 - debug("bad #address-cells\n"); 36 + dm_warn("bad #address-cells\n"); 36 37 return FDT_ADDR_T_NONE; 37 38 } 38 39 39 40 ns = fdt_size_cells(gd->fdt_blob, parent); 40 41 if (ns < 0) { 41 - debug("bad #size-cells\n"); 42 + dm_warn("bad #size-cells\n"); 42 43 return FDT_ADDR_T_NONE; 43 44 } 44 45 45 46 reg = fdt_getprop(gd->fdt_blob, offset, "reg", &len); 46 47 if (!reg || (len <= (index * sizeof(fdt32_t) * (na + ns)))) { 47 - debug("Req index out of range\n"); 48 + dm_warn("Req index out of range\n"); 48 49 return FDT_ADDR_T_NONE; 49 50 } 50 51
+3 -4
drivers/core/lists.c
··· 144 144 145 145 drv = lists_driver_lookup_name(drv_name); 146 146 if (!drv) { 147 - debug("Cannot find driver '%s'\n", drv_name); 147 + dm_warn("Cannot find driver '%s'\n", drv_name); 148 148 return -ENOENT; 149 149 } 150 150 ret = device_bind_with_driver_data(parent, drv, dev_name, 0 /* data */, ··· 246 246 } 247 247 248 248 if (entry->of_match) 249 - log_debug(" - found match at '%s': '%s' matches '%s'\n", 250 - entry->name, entry->of_match->compatible, 251 - id->compatible); 249 + log_debug(" - found match at driver '%s' for '%s'\n", 250 + entry->name, id->compatible); 252 251 ret = device_bind_with_driver_data(parent, entry, name, 253 252 id ? id->data : 0, node, 254 253 &dev);
+26 -25
drivers/core/of_access.c
··· 25 25 #include <linux/bug.h> 26 26 #include <linux/libfdt.h> 27 27 #include <dm/of_access.h> 28 + #include <dm/util.h> 28 29 #include <linux/ctype.h> 29 30 #include <linux/err.h> 30 31 #include <linux/ioport.h> ··· 489 490 { 490 491 const u8 *val; 491 492 492 - debug("%s: %s: ", __func__, propname); 493 + dm_warn("%s: %s: ", __func__, propname); 493 494 if (!np) 494 495 return -EINVAL; 495 496 val = of_find_property_value_of_size(np, propname, sizeof(*outp)); 496 497 if (IS_ERR(val)) { 497 - debug("(not found)\n"); 498 + dm_warn("(not found)\n"); 498 499 return PTR_ERR(val); 499 500 } 500 501 501 502 *outp = *val; 502 - debug("%#x (%d)\n", *outp, *outp); 503 + dm_warn("%#x (%d)\n", *outp, *outp); 503 504 504 505 return 0; 505 506 } ··· 508 509 { 509 510 const __be16 *val; 510 511 511 - debug("%s: %s: ", __func__, propname); 512 + dm_warn("%s: %s: ", __func__, propname); 512 513 if (!np) 513 514 return -EINVAL; 514 515 val = of_find_property_value_of_size(np, propname, sizeof(*outp)); 515 516 if (IS_ERR(val)) { 516 - debug("(not found)\n"); 517 + dm_warn("(not found)\n"); 517 518 return PTR_ERR(val); 518 519 } 519 520 520 521 *outp = be16_to_cpup(val); 521 - debug("%#x (%d)\n", *outp, *outp); 522 + dm_warn("%#x (%d)\n", *outp, *outp); 522 523 523 524 return 0; 524 525 } ··· 533 534 { 534 535 const __be32 *val; 535 536 536 - debug("%s: %s: ", __func__, propname); 537 + dm_warn("%s: %s: ", __func__, propname); 537 538 val = of_find_property_value_of_size(np, propname, 538 539 sz * sizeof(*out_values)); 539 540 540 541 if (IS_ERR(val)) 541 542 return PTR_ERR(val); 542 543 543 - debug("size %zd\n", sz); 544 + dm_warn("size %zd\n", sz); 544 545 while (sz--) 545 546 *out_values++ = be32_to_cpup(val++); 546 547 ··· 552 553 { 553 554 const __be32 *val; 554 555 555 - debug("%s: %s: ", __func__, propname); 556 + dm_warn("%s: %s: ", __func__, propname); 556 557 if (!np) 557 558 return -EINVAL; 558 559 559 560 val = of_find_property_value_of_size(np, propname, 560 561 sizeof(*outp) * (index + 1)); 561 562 if (IS_ERR(val)) { 562 - debug("(not found)\n"); 563 + dm_warn("(not found)\n"); 563 564 return PTR_ERR(val); 564 565 } 565 566 566 567 *outp = be32_to_cpup(val + index); 567 - debug("%#x (%d)\n", *outp, *outp); 568 + dm_warn("%#x (%d)\n", *outp, *outp); 568 569 569 570 return 0; 570 571 } ··· 574 575 { 575 576 const __be64 *val; 576 577 577 - debug("%s: %s: ", __func__, propname); 578 + dm_warn("%s: %s: ", __func__, propname); 578 579 if (!np) 579 580 return -EINVAL; 580 581 581 582 val = of_find_property_value_of_size(np, propname, 582 583 sizeof(*outp) * (index + 1)); 583 584 if (IS_ERR(val)) { 584 - debug("(not found)\n"); 585 + dm_warn("(not found)\n"); 585 586 return PTR_ERR(val); 586 587 } 587 588 588 589 *outp = be64_to_cpup(val + index); 589 - debug("%#llx (%lld)\n", (unsigned long long)*outp, 590 - (unsigned long long)*outp); 590 + dm_warn("%#llx (%lld)\n", (unsigned long long)*outp, 591 + (unsigned long long)*outp); 591 592 592 593 return 0; 593 594 } ··· 620 621 l = strnlen(p, end - p) + 1; 621 622 if (p + l > end) 622 623 return -EILSEQ; 623 - debug("comparing %s with %s\n", string, p); 624 + dm_warn("comparing %s with %s\n", string, p); 624 625 if (strcmp(string, p) == 0) 625 626 return i; /* Found it; return index */ 626 627 } ··· 707 708 if (cells_name || cur_index == index) { 708 709 node = of_find_node_by_phandle(NULL, phandle); 709 710 if (!node) { 710 - debug("%s: could not find phandle\n", 711 - np->full_name); 711 + dm_warn("%s: could not find phandle\n", 712 + np->full_name); 712 713 goto err; 713 714 } 714 715 } 715 716 716 717 if (cells_name) { 717 718 if (of_read_u32(node, cells_name, &count)) { 718 - debug("%s: could not get %s for %s\n", 719 - np->full_name, cells_name, 720 - node->full_name); 719 + dm_warn("%s: could not get %s for %s\n", 720 + np->full_name, cells_name, 721 + node->full_name); 721 722 goto err; 722 723 } 723 724 } else { ··· 729 730 * remaining property data length 730 731 */ 731 732 if (list + count > list_end) { 732 - debug("%s: arguments longer than property\n", 733 - np->full_name); 733 + dm_warn("%s: arguments longer than property\n", 734 + np->full_name); 734 735 goto err; 735 736 } 736 737 } ··· 825 826 strncpy(ap->stem, stem, stem_len); 826 827 ap->stem[stem_len] = 0; 827 828 list_add_tail(&ap->link, &aliases_lookup); 828 - debug("adding DT alias:%s: stem=%s id=%i node=%s\n", 829 - ap->alias, ap->stem, ap->id, of_node_full_name(np)); 829 + dm_warn("adding DT alias:%s: stem=%s id=%i node=%s\n", 830 + ap->alias, ap->stem, ap->id, of_node_full_name(np)); 830 831 } 831 832 832 833 int of_alias_scan(void)
+21 -20
drivers/core/of_addr.c
··· 11 11 #include <linux/libfdt.h> 12 12 #include <dm/of_access.h> 13 13 #include <dm/of_addr.h> 14 + #include <dm/util.h> 14 15 #include <linux/err.h> 15 16 #include <linux/ioport.h> 16 17 #include <linux/printk.h> ··· 26 27 #ifdef DEBUG 27 28 static void of_dump_addr(const char *s, const __be32 *addr, int na) 28 29 { 29 - debug("%s", s); 30 + dm_warn("%s", s); 30 31 while (na--) 31 32 pr_cont(" %08x", be32_to_cpu(*(addr++))); 32 33 pr_cont("\n"); ··· 65 66 s = of_read_number(range + na + pna, ns); 66 67 da = of_read_number(addr, na); 67 68 68 - debug("default map, cp=%llx, s=%llx, da=%llx\n", 69 - (unsigned long long)cp, (unsigned long long)s, 70 - (unsigned long long)da); 69 + dm_warn("default map, cp=%llx, s=%llx, da=%llx\n", 70 + (unsigned long long)cp, (unsigned long long)s, 71 + (unsigned long long)da); 71 72 72 73 if (da < cp || da >= (cp + s)) 73 74 return OF_BAD_ADDR; ··· 193 194 ranges = of_get_property(parent, rprop, &rlen); 194 195 if (ranges == NULL && !of_empty_ranges_quirk(parent) && 195 196 strcmp(rprop, "dma-ranges")) { 196 - debug("no ranges; cannot translate\n"); 197 + dm_warn("no ranges; cannot translate\n"); 197 198 return 1; 198 199 } 199 200 if (ranges == NULL || rlen == 0) { 200 201 offset = of_read_number(addr, na); 201 202 memset(addr, 0, pna * 4); 202 - debug("empty ranges; 1:1 translation\n"); 203 + dm_warn("empty ranges; 1:1 translation\n"); 203 204 goto finish; 204 205 } 205 206 206 - debug("walking ranges...\n"); 207 + dm_warn("walking ranges...\n"); 207 208 208 209 /* Now walk through the ranges */ 209 210 rlen /= 4; ··· 214 215 break; 215 216 } 216 217 if (offset == OF_BAD_ADDR) { 217 - debug("not found !\n"); 218 + dm_warn("not found !\n"); 218 219 return 1; 219 220 } 220 221 memcpy(addr, ranges + na, 4 * pna); 221 222 222 223 finish: 223 224 of_dump_addr("parent translation for:", addr, pna); 224 - debug("with offset: %llx\n", (unsigned long long)offset); 225 + dm_warn("with offset: %llx\n", (unsigned long long)offset); 225 226 226 227 /* Translate it into parent bus space */ 227 228 return pbus->translate(addr, offset, pna); ··· 246 247 int na, ns, pna, pns; 247 248 u64 result = OF_BAD_ADDR; 248 249 249 - debug("** translation for device %s **\n", of_node_full_name(dev)); 250 + dm_warn("** translation for device %s **\n", of_node_full_name(dev)); 250 251 251 252 /* Increase refcount at current level */ 252 253 (void)of_node_get(dev); ··· 260 261 /* Count address cells & copy address locally */ 261 262 bus->count_cells(dev, &na, &ns); 262 263 if (!OF_CHECK_COUNTS(na, ns)) { 263 - debug("Bad cell count for %s\n", of_node_full_name(dev)); 264 + dm_warn("Bad cell count for %s\n", of_node_full_name(dev)); 264 265 goto bail; 265 266 } 266 267 memcpy(addr, in_addr, na * 4); 267 268 268 - debug("bus is %s (na=%d, ns=%d) on %s\n", bus->name, na, ns, 269 - of_node_full_name(parent)); 269 + dm_warn("bus is %s (na=%d, ns=%d) on %s\n", bus->name, na, ns, 270 + of_node_full_name(parent)); 270 271 of_dump_addr("translating address:", addr, na); 271 272 272 273 /* Translate */ ··· 278 279 279 280 /* If root, we have finished */ 280 281 if (parent == NULL) { 281 - debug("reached root node\n"); 282 + dm_warn("reached root node\n"); 282 283 result = of_read_number(addr, na); 283 284 break; 284 285 } ··· 287 288 pbus = of_match_bus(parent); 288 289 pbus->count_cells(dev, &pna, &pns); 289 290 if (!OF_CHECK_COUNTS(pna, pns)) { 290 - debug("Bad cell count for %s\n", 291 - of_node_full_name(dev)); 291 + dm_warn("Bad cell count for %s\n", 292 + of_node_full_name(dev)); 292 293 break; 293 294 } 294 295 295 - debug("parent bus is %s (na=%d, ns=%d) on %s\n", pbus->name, 296 - pna, pns, of_node_full_name(parent)); 296 + dm_warn("parent bus is %s (na=%d, ns=%d) on %s\n", pbus->name, 297 + pna, pns, of_node_full_name(parent)); 297 298 298 299 /* Apply bus translation */ 299 300 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) ··· 358 359 } 359 360 360 361 if (!dev || !ranges) { 361 - debug("no dma-ranges found for node %s\n", 362 - of_node_full_name(dev)); 362 + dm_warn("no dma-ranges found for node %s\n", 363 + of_node_full_name(dev)); 363 364 ret = -ENOENT; 364 365 goto out; 365 366 }
+17 -16
drivers/core/of_extra.c
··· 9 9 #include <dm/of_access.h> 10 10 #include <dm/of_extra.h> 11 11 #include <dm/ofnode.h> 12 + #include <dm/util.h> 12 13 13 14 int ofnode_read_fmap_entry(ofnode node, struct fmap_entry *entry) 14 15 { ··· 16 17 ofnode subnode; 17 18 18 19 if (ofnode_read_u32(node, "image-pos", &entry->offset)) { 19 - debug("Node '%s' has bad/missing 'image-pos' property\n", 20 - ofnode_get_name(node)); 20 + dm_warn("Node '%s' has bad/missing 'image-pos' property\n", 21 + ofnode_get_name(node)); 21 22 return log_msg_ret("image-pos", -ENOENT); 22 23 } 23 24 if (ofnode_read_u32(node, "size", &entry->length)) { 24 - debug("Node '%s' has bad/missing 'size' property\n", 25 - ofnode_get_name(node)); 25 + dm_warn("Node '%s' has bad/missing 'size' property\n", 26 + ofnode_get_name(node)); 26 27 return log_msg_ret("size", -ENOENT); 27 28 } 28 29 entry->used = ofnode_read_s32_default(node, "used", entry->length); ··· 57 58 const fdt_addr_t *cell; 58 59 int len; 59 60 60 - debug("%s: %s: %s\n", __func__, ofnode_get_name(node), prop_name); 61 + dm_warn("%s: %s: %s\n", __func__, ofnode_get_name(node), prop_name); 61 62 cell = ofnode_get_property(node, prop_name, &len); 62 63 if (!cell || (len < sizeof(fdt_addr_t) * 2)) { 63 - debug("cell=%p, len=%d\n", cell, len); 64 + dm_warn("cell=%p, len=%d\n", cell, len); 64 65 return -1; 65 66 } 66 67 67 68 *basep = fdt_addr_to_cpu(*cell); 68 69 *sizep = fdt_size_to_cpu(cell[1]); 69 - debug("%s: base=%08lx, size=%lx\n", __func__, (ulong)*basep, 70 - (ulong)*sizep); 70 + dm_warn("%s: base=%08lx, size=%lx\n", __func__, (ulong)*basep, 71 + (ulong)*sizep); 71 72 72 73 return 0; 73 74 } ··· 85 86 if (!ofnode_valid(config_node)) { 86 87 config_node = ofnode_path("/config"); 87 88 if (!ofnode_valid(config_node)) { 88 - debug("%s: Cannot find /config node\n", __func__); 89 + dm_warn("%s: Cannot find /config node\n", __func__); 89 90 return -ENOENT; 90 91 } 91 92 } ··· 96 97 suffix); 97 98 mem = ofnode_read_string(config_node, prop_name); 98 99 if (!mem) { 99 - debug("%s: No memory type for '%s', using /memory\n", __func__, 100 - prop_name); 100 + dm_warn("%s: No memory type for '%s', using /memory\n", __func__, 101 + prop_name); 101 102 mem = "/memory"; 102 103 } 103 104 104 105 node = ofnode_path(mem); 105 106 if (!ofnode_valid(node)) { 106 - debug("%s: Failed to find node '%s'\n", __func__, mem); 107 + dm_warn("%s: Failed to find node '%s'\n", __func__, mem); 107 108 return -ENOENT; 108 109 } 109 110 ··· 112 113 * use the first 113 114 */ 114 115 if (ofnode_decode_region(node, "reg", &base, &size)) { 115 - debug("%s: Failed to decode memory region %s\n", __func__, 116 - mem); 116 + dm_warn("%s: Failed to decode memory region %s\n", __func__, 117 + mem); 117 118 return -EINVAL; 118 119 } 119 120 ··· 121 122 suffix); 122 123 if (ofnode_decode_region(config_node, prop_name, &offset, 123 124 &offset_size)) { 124 - debug("%s: Failed to decode memory region '%s'\n", __func__, 125 - prop_name); 125 + dm_warn("%s: Failed to decode memory region '%s'\n", __func__, 126 + prop_name); 126 127 return -EINVAL; 127 128 } 128 129
+41 -40
drivers/core/ofnode.c
··· 16 16 #include <dm/of_access.h> 17 17 #include <dm/of_addr.h> 18 18 #include <dm/ofnode.h> 19 + #include <dm/util.h> 19 20 #include <linux/err.h> 20 21 #include <linux/ioport.h> 21 22 #include <asm/global_data.h> ··· 314 315 int len; 315 316 316 317 assert(ofnode_valid(node)); 317 - debug("%s: %s: ", __func__, propname); 318 + dm_warn("%s: %s: ", __func__, propname); 318 319 319 320 if (ofnode_is_np(node)) 320 321 return of_read_u8(ofnode_to_np(node), propname, outp); ··· 322 323 cell = fdt_getprop(gd->fdt_blob, ofnode_to_offset(node), propname, 323 324 &len); 324 325 if (!cell || len < sizeof(*cell)) { 325 - debug("(not found)\n"); 326 + dm_warn("(not found)\n"); 326 327 return -EINVAL; 327 328 } 328 329 *outp = *cell; 329 - debug("%#x (%d)\n", *outp, *outp); 330 + dm_warn("%#x (%u)\n", *outp, *outp); 330 331 331 332 return 0; 332 333 } ··· 345 346 int len; 346 347 347 348 assert(ofnode_valid(node)); 348 - debug("%s: %s: ", __func__, propname); 349 + dm_warn("%s: %s: ", __func__, propname); 349 350 350 351 if (ofnode_is_np(node)) 351 352 return of_read_u16(ofnode_to_np(node), propname, outp); ··· 353 354 cell = fdt_getprop(gd->fdt_blob, ofnode_to_offset(node), propname, 354 355 &len); 355 356 if (!cell || len < sizeof(*cell)) { 356 - debug("(not found)\n"); 357 + dm_warn("(not found)\n"); 357 358 return -EINVAL; 358 359 } 359 360 *outp = be16_to_cpup(cell); 360 - debug("%#x (%d)\n", *outp, *outp); 361 + dm_warn("%#x (%u)\n", *outp, *outp); 361 362 362 363 return 0; 363 364 } ··· 390 391 int len; 391 392 392 393 assert(ofnode_valid(node)); 393 - debug("%s: %s: ", __func__, propname); 394 + dm_warn("%s: %s: ", __func__, propname); 394 395 395 396 if (ofnode_is_np(node)) 396 397 return of_read_u32_index(ofnode_to_np(node), propname, index, ··· 399 400 cell = fdt_getprop(ofnode_to_fdt(node), ofnode_to_offset(node), 400 401 propname, &len); 401 402 if (!cell) { 402 - debug("(not found)\n"); 403 + dm_warn("(not found)\n"); 403 404 return -EINVAL; 404 405 } 405 406 406 407 if (len < (sizeof(int) * (index + 1))) { 407 - debug("(not large enough)\n"); 408 + dm_warn("(not large enough)\n"); 408 409 return -EOVERFLOW; 409 410 } 410 411 411 412 *outp = fdt32_to_cpu(cell[index]); 412 - debug("%#x (%d)\n", *outp, *outp); 413 + dm_warn("%#x (%u)\n", *outp, *outp); 413 414 414 415 return 0; 415 416 } ··· 429 430 cell = fdt_getprop(ofnode_to_fdt(node), ofnode_to_offset(node), 430 431 propname, &len); 431 432 if (!cell) { 432 - debug("(not found)\n"); 433 + dm_warn("(not found)\n"); 433 434 return -EINVAL; 434 435 } 435 436 436 437 if (len < (sizeof(u64) * (index + 1))) { 437 - debug("(not large enough)\n"); 438 + dm_warn("(not large enough)\n"); 438 439 return -EOVERFLOW; 439 440 } 440 441 441 442 *outp = fdt64_to_cpu(cell[index]); 442 - debug("%#llx (%lld)\n", *outp, *outp); 443 + dm_warn("%#llx (%llu)\n", *outp, *outp); 443 444 444 445 return 0; 445 446 } ··· 467 468 int len; 468 469 469 470 assert(ofnode_valid(node)); 470 - debug("%s: %s: ", __func__, propname); 471 + dm_warn("%s: %s: ", __func__, propname); 471 472 472 473 if (ofnode_is_np(node)) 473 474 return of_read_u64(ofnode_to_np(node), propname, outp); ··· 475 476 cell = fdt_getprop(ofnode_to_fdt(node), ofnode_to_offset(node), 476 477 propname, &len); 477 478 if (!cell || len < sizeof(*cell)) { 478 - debug("(not found)\n"); 479 + dm_warn("(not found)\n"); 479 480 return -EINVAL; 480 481 } 481 482 *outp = fdt64_to_cpu(cell[0]); 482 - debug("%#llx (%lld)\n", (unsigned long long)*outp, 483 - (unsigned long long)*outp); 483 + dm_warn("%#llx (%llu)\n", (unsigned long long)*outp, 484 + (unsigned long long)*outp); 484 485 485 486 return 0; 486 487 } ··· 498 499 bool prop; 499 500 500 501 assert(ofnode_valid(node)); 501 - debug("%s: %s: ", __func__, propname); 502 + dm_warn("%s: %s: ", __func__, propname); 502 503 503 504 prop = ofnode_has_property(node, propname); 504 505 505 - debug("%s\n", prop ? "true" : "false"); 506 + dm_warn("%s\n", prop ? "true" : "false"); 506 507 507 508 return prop ? true : false; 508 509 } ··· 513 514 int len; 514 515 515 516 assert(ofnode_valid(node)); 516 - debug("%s: %s: ", __func__, propname); 517 + dm_warn("%s: %s: ", __func__, propname); 517 518 518 519 if (ofnode_is_np(node)) { 519 520 struct property *prop = of_find_property( ··· 528 529 propname, &len); 529 530 } 530 531 if (!val) { 531 - debug("<not found>\n"); 532 + dm_warn("<not found>\n"); 532 533 if (sizep) 533 534 *sizep = -FDT_ERR_NOTFOUND; 534 535 return NULL; ··· 549 550 return NULL; 550 551 551 552 if (strnlen(str, len) >= len) { 552 - debug("<invalid>\n"); 553 + dm_warn("<invalid>\n"); 553 554 return NULL; 554 555 } 555 - debug("%s\n", str); 556 + dm_warn("%s\n", str); 556 557 557 558 return str; 558 559 } ··· 572 573 ofnode subnode; 573 574 574 575 assert(ofnode_valid(node)); 575 - debug("%s: %s: ", __func__, subnode_name); 576 + dm_warn("%s: %s: ", __func__, subnode_name); 576 577 577 578 if (ofnode_is_np(node)) { 578 579 struct device_node *np = ofnode_to_np(node); ··· 587 588 ofnode_to_offset(node), subnode_name); 588 589 subnode = noffset_to_ofnode(node, ooffset); 589 590 } 590 - debug("%s\n", ofnode_valid(subnode) ? 591 - ofnode_get_name(subnode) : "<none>"); 591 + dm_warn("%s\n", ofnode_valid(subnode) ? 592 + ofnode_get_name(subnode) : "<none>"); 592 593 593 594 return subnode; 594 595 } ··· 597 598 u32 *out_values, size_t sz) 598 599 { 599 600 assert(ofnode_valid(node)); 600 - debug("%s: %s: ", __func__, propname); 601 + dm_warn("%s: %s: ", __func__, propname); 601 602 602 603 if (ofnode_is_np(node)) { 603 604 return of_read_u32_array(ofnode_to_np(node), propname, ··· 669 670 const char *ofnode_get_name(ofnode node) 670 671 { 671 672 if (!ofnode_valid(node)) { 672 - debug("%s node not valid\n", __func__); 673 + dm_warn("%s node not valid\n", __func__); 673 674 return NULL; 674 675 } 675 676 ··· 1030 1031 if (!prop) 1031 1032 return ofnode_null(); 1032 1033 1033 - debug("%s: node_path: %s\n", __func__, prop); 1034 + dm_warn("%s: node_path: %s\n", __func__, prop); 1034 1035 1035 1036 return ofnode_path(prop); 1036 1037 } ··· 1053 1054 1054 1055 length = ofnode_read_size(node, name); 1055 1056 if (length < 0) { 1056 - debug("%s: could not find property %s\n", 1057 - ofnode_get_name(node), name); 1057 + dm_warn("%s: could not find property %s\n", 1058 + ofnode_get_name(node), name); 1058 1059 return length; 1059 1060 } 1060 1061 ··· 1299 1300 int len; 1300 1301 int ret = -ENOENT; 1301 1302 1302 - debug("%s: %s: ", __func__, propname); 1303 + dm_warn("%s: %s: ", __func__, propname); 1303 1304 1304 1305 /* 1305 1306 * If we follow the pci bus bindings strictly, we should check ··· 1316 1317 int i; 1317 1318 1318 1319 for (i = 0; i < num; i++) { 1319 - debug("pci address #%d: %08lx %08lx %08lx\n", i, 1320 - (ulong)fdt32_to_cpu(cell[0]), 1320 + dm_warn("pci address #%d: %08lx %08lx %08lx\n", i, 1321 + (ulong)fdt32_to_cpu(cell[0]), 1321 1322 (ulong)fdt32_to_cpu(cell[1]), 1322 1323 (ulong)fdt32_to_cpu(cell[2])); 1323 1324 if ((fdt32_to_cpu(*cell) & type) == type) { ··· 1346 1347 ret = -EINVAL; 1347 1348 1348 1349 fail: 1349 - debug("(not found)\n"); 1350 + dm_warn("(not found)\n"); 1350 1351 return ret; 1351 1352 } 1352 1353 ··· 1630 1631 { 1631 1632 assert(ofnode_valid(node)); 1632 1633 1633 - debug("%s: %s = %s", __func__, propname, value); 1634 + dm_warn("%s: %s = %s", __func__, propname, value); 1634 1635 1635 1636 return ofnode_write_prop(node, propname, value, strlen(value) + 1, 1636 1637 false); ··· 1743 1744 1744 1745 uboot = ofnode_path("/options/u-boot"); 1745 1746 if (!ofnode_valid(uboot)) { 1746 - debug("%s: Missing /u-boot node\n", __func__); 1747 + dm_warn("%s: Missing /u-boot node\n", __func__); 1747 1748 return -EINVAL; 1748 1749 } 1749 1750 ··· 1769 1770 1770 1771 uboot = ofnode_path("/options/u-boot"); 1771 1772 if (!ofnode_valid(uboot)) { 1772 - debug("%s: Missing /u-boot node\n", __func__); 1773 + dm_warn("%s: Missing /u-boot node\n", __func__); 1773 1774 return -EINVAL; 1774 1775 } 1775 1776 ··· 1784 1785 return -EINVAL; 1785 1786 1786 1787 if (!bootscr_flash_size) { 1787 - debug("bootscr-flash-size is zero. Ignoring properties!\n"); 1788 + dm_warn("bootscr-flash-size is zero. Ignoring properties!\n"); 1788 1789 *bootscr_flash_offset = 0; 1789 1790 return -EINVAL; 1790 1791 } ··· 1831 1832 if (!strcmp(mode, phy_interface_strings[i])) 1832 1833 return i; 1833 1834 1834 - debug("%s: Invalid PHY interface '%s'\n", __func__, mode); 1835 + dm_warn("%s: Invalid PHY interface '%s'\n", __func__, mode); 1835 1836 1836 1837 return PHY_INTERFACE_MODE_NA; 1837 1838 }
+29 -28
drivers/core/regmap.c
··· 17 17 #include <asm/io.h> 18 18 #include <dm/of_addr.h> 19 19 #include <dm/devres.h> 20 + #include <dm/util.h> 20 21 #include <linux/ioport.h> 21 22 #include <linux/compat.h> 22 23 #include <linux/err.h> ··· 139 140 ret = of_address_to_resource(ofnode_to_np(node), 140 141 index, &r); 141 142 if (ret) { 142 - debug("%s: Could not read resource of range %d (ret = %d)\n", 143 - ofnode_get_name(node), index, ret); 143 + dm_warn("%s: Could not read resource of range %d (ret = %d)\n", 144 + ofnode_get_name(node), index, ret); 144 145 return ret; 145 146 } 146 147 ··· 154 155 addr_len, size_len, 155 156 &sz, true); 156 157 if (range->start == FDT_ADDR_T_NONE) { 157 - debug("%s: Could not read start of range %d\n", 158 - ofnode_get_name(node), index); 158 + dm_warn("%s: Could not read start of range %d\n", 159 + ofnode_get_name(node), index); 159 160 return -EINVAL; 160 161 } 161 162 ··· 173 174 174 175 addr_len = ofnode_read_simple_addr_cells(ofnode_get_parent(node)); 175 176 if (addr_len < 0) { 176 - debug("%s: Error while reading the addr length (ret = %d)\n", 177 - ofnode_get_name(node), addr_len); 177 + dm_warn("%s: Error while reading the addr length (ret = %d)\n", 178 + ofnode_get_name(node), addr_len); 178 179 return addr_len; 179 180 } 180 181 181 182 size_len = ofnode_read_simple_size_cells(ofnode_get_parent(node)); 182 183 if (size_len < 0) { 183 - debug("%s: Error while reading the size length: (ret = %d)\n", 184 - ofnode_get_name(node), size_len); 184 + dm_warn("%s: Error while reading the size length: (ret = %d)\n", 185 + ofnode_get_name(node), size_len); 185 186 return size_len; 186 187 } 187 188 ··· 250 251 251 252 addr_len = ofnode_read_simple_addr_cells(ofnode_get_parent(node)); 252 253 if (addr_len < 0) { 253 - debug("%s: Error while reading the addr length (ret = %d)\n", 254 - ofnode_get_name(node), addr_len); 254 + dm_warn("%s: Error while reading the addr length (ret = %d)\n", 255 + ofnode_get_name(node), addr_len); 255 256 return addr_len; 256 257 } 257 258 258 259 size_len = ofnode_read_simple_size_cells(ofnode_get_parent(node)); 259 260 if (size_len < 0) { 260 - debug("%s: Error while reading the size length: (ret = %d)\n", 261 - ofnode_get_name(node), size_len); 261 + dm_warn("%s: Error while reading the size length: (ret = %d)\n", 262 + ofnode_get_name(node), size_len); 262 263 return size_len; 263 264 } 264 265 265 266 both_len = addr_len + size_len; 266 267 if (!both_len) { 267 - debug("%s: Both addr and size length are zero\n", 268 - ofnode_get_name(node)); 268 + dm_warn("%s: Both addr and size length are zero\n", 269 + ofnode_get_name(node)); 269 270 return -EINVAL; 270 271 } 271 272 272 273 len = ofnode_read_size(node, "reg"); 273 274 if (len < 0) { 274 - debug("%s: Error while reading reg size (ret = %d)\n", 275 - ofnode_get_name(node), len); 275 + dm_warn("%s: Error while reading reg size (ret = %d)\n", 276 + ofnode_get_name(node), len); 276 277 return len; 277 278 } 278 279 len /= sizeof(fdt32_t); 279 280 count = len / both_len; 280 281 if (!count) { 281 - debug("%s: Not enough data in reg property\n", 282 - ofnode_get_name(node)); 282 + dm_warn("%s: Not enough data in reg property\n", 283 + ofnode_get_name(node)); 283 284 return -EINVAL; 284 285 } 285 286 ··· 424 425 void *ptr; 425 426 426 427 if (do_range_check() && range_num >= map->range_count) { 427 - debug("%s: range index %d larger than range count\n", 428 - __func__, range_num); 428 + dm_warn("%s: range index %d larger than range count\n", 429 + __func__, range_num); 429 430 return -ERANGE; 430 431 } 431 432 range = &map->ranges[range_num]; ··· 433 434 offset <<= map->reg_offset_shift; 434 435 if (do_range_check() && 435 436 (offset + val_len > range->size || offset + val_len < offset)) { 436 - debug("%s: offset/size combination invalid\n", __func__); 437 + dm_warn("%s: offset/size combination invalid\n", __func__); 437 438 return -ERANGE; 438 439 } 439 440 ··· 455 456 break; 456 457 #endif 457 458 default: 458 - debug("%s: regmap size %zu unknown\n", __func__, val_len); 459 + dm_warn("%s: regmap size %zu unknown\n", __func__, val_len); 459 460 return -EINVAL; 460 461 } 461 462 ··· 564 565 void *ptr; 565 566 566 567 if (range_num >= map->range_count) { 567 - debug("%s: range index %d larger than range count\n", 568 - __func__, range_num); 568 + dm_warn("%s: range index %d larger than range count\n", 569 + __func__, range_num); 569 570 return -ERANGE; 570 571 } 571 572 range = &map->ranges[range_num]; 572 573 573 574 offset <<= map->reg_offset_shift; 574 575 if (offset + val_len > range->size || offset + val_len < offset) { 575 - debug("%s: offset/size combination invalid\n", __func__); 576 + dm_warn("%s: offset/size combination invalid\n", __func__); 576 577 return -ERANGE; 577 578 } 578 579 ··· 594 595 break; 595 596 #endif 596 597 default: 597 - debug("%s: regmap size %zu unknown\n", __func__, val_len); 598 + dm_warn("%s: regmap size %zu unknown\n", __func__, val_len); 598 599 return -EINVAL; 599 600 } 600 601 ··· 630 631 u.v64 = val; 631 632 break; 632 633 default: 633 - debug("%s: regmap size %zu unknown\n", __func__, 634 - (size_t)map->width); 634 + dm_warn("%s: regmap size %zu unknown\n", __func__, 635 + (size_t)map->width); 635 636 return -EINVAL; 636 637 } 637 638
+7 -7
drivers/core/root.c
··· 207 207 err = lists_bind_fdt(parent, node, NULL, NULL, pre_reloc_only); 208 208 if (err && !ret) { 209 209 ret = err; 210 - debug("%s: ret=%d\n", node_name, ret); 210 + dm_warn("%s: ret=%d\n", node_name, ret); 211 211 } 212 212 } 213 213 ··· 248 248 249 249 ret = dm_scan_fdt(pre_reloc_only); 250 250 if (ret) { 251 - debug("dm_scan_fdt() failed: %d\n", ret); 251 + dm_warn("dm_scan_fdt() failed: %d\n", ret); 252 252 return ret; 253 253 } 254 254 ··· 256 256 for (i = 0; i < ARRAY_SIZE(nodes); i++) { 257 257 ret = dm_scan_fdt_ofnode_path(nodes[i], pre_reloc_only); 258 258 if (ret) { 259 - debug("dm_scan_fdt() scan for %s failed: %d\n", 260 - nodes[i], ret); 259 + dm_warn("dm_scan_fdt() scan for %s failed: %d\n", 260 + nodes[i], ret); 261 261 return ret; 262 262 } 263 263 } ··· 320 320 321 321 ret = dm_scan_plat(pre_reloc_only); 322 322 if (ret) { 323 - debug("dm_scan_plat() failed: %d\n", ret); 323 + dm_warn("dm_scan_plat() failed: %d\n", ret); 324 324 return ret; 325 325 } 326 326 327 327 if (CONFIG_IS_ENABLED(OF_REAL)) { 328 328 ret = dm_extended_scan(pre_reloc_only); 329 329 if (ret) { 330 - debug("dm_extended_scan() failed: %d\n", ret); 330 + dm_warn("dm_extended_scan() failed: %d\n", ret); 331 331 return ret; 332 332 } 333 333 } ··· 345 345 346 346 ret = dm_init(CONFIG_IS_ENABLED(OF_LIVE)); 347 347 if (ret) { 348 - debug("dm_init() failed: %d\n", ret); 348 + dm_warn("dm_init() failed: %d\n", ret); 349 349 return ret; 350 350 } 351 351 if (!CONFIG_IS_ENABLED(OF_PLATDATA_INST)) {
+2 -2
drivers/core/uclass.c
··· 59 59 *ucp = NULL; 60 60 uc_drv = lists_uclass_lookup(id); 61 61 if (!uc_drv) { 62 - debug("Cannot find uclass for id %d: please add the UCLASS_DRIVER() declaration for this UCLASS_... id\n", 63 - id); 62 + dm_warn("Cannot find uclass for id %d: please add the UCLASS_DRIVER() declaration for this UCLASS_... id\n", 63 + id); 64 64 /* 65 65 * Use a strange error to make this case easier to find. When 66 66 * a uclass is not available it can prevent driver model from
-4
include/asm-generic/global_data.h
··· 196 196 */ 197 197 struct udevice *dm_root; 198 198 /** 199 - * @dm_root_f: pre-relocation root instance 200 - */ 201 - struct udevice *dm_root_f; 202 - /** 203 199 * @uclass_root_s: 204 200 * head of core tree when uclasses are not in read-only memory. 205 201 *
+1 -1
lib/acpi/acpi_table.c
··· 117 117 memcpy(header->oem_table_id, OEM_TABLE_ID, 8); 118 118 header->oem_revision = OEM_REVISION; 119 119 memcpy(header->creator_id, ASLC_ID, 4); 120 + header->creator_revision = ASL_REVISION; 120 121 } 121 122 122 123 void acpi_align(struct acpi_ctx *ctx) ··· 219 220 220 221 header->revision = acpi_get_table_revision(ACPITAB_DBG2); 221 222 acpi_fill_header(header, "DBG2"); 222 - header->creator_revision = ASL_REVISION; 223 223 224 224 /* One debug device defined */ 225 225 dbg2->devices_offset = sizeof(struct acpi_dbg2_header);
-1
lib/acpi/ssdt.c
··· 23 23 24 24 acpi_fill_header(ssdt, "SSDT"); 25 25 ssdt->revision = acpi_get_table_revision(ACPITAB_SSDT); 26 - ssdt->creator_revision = 1; 27 26 ssdt->length = sizeof(struct acpi_table_header); 28 27 29 28 acpi_inc(ctx, sizeof(struct acpi_table_header));
+1 -2
test/dm/acpi.c
··· 236 236 hdr.length = 0x11; 237 237 hdr.revision = 0x22; 238 238 hdr.checksum = 0x33; 239 - hdr.creator_revision = 0x44; 240 239 acpi_fill_header(&hdr, "ABCD"); 241 240 242 241 ut_asserteq_mem("ABCD", hdr.signature, sizeof(hdr.signature)); ··· 248 247 sizeof(hdr.oem_table_id)); 249 248 ut_asserteq(OEM_REVISION, hdr.oem_revision); 250 249 ut_asserteq_mem(ASLC_ID, hdr.creator_id, sizeof(hdr.creator_id)); 251 - ut_asserteq(0x44, hdr.creator_revision); 250 + ut_asserteq(ASL_REVISION, hdr.creator_revision); 252 251 253 252 return 0; 254 253 }
-1
test/dm/core.c
··· 1006 1006 ut_assertok(uclass_get(UCLASS_TEST, &uc)); 1007 1007 1008 1008 gd->dm_root = NULL; 1009 - gd->dm_root_f = NULL; 1010 1009 memset(&gd->uclass_root, '\0', sizeof(gd->uclass_root)); 1011 1010 1012 1011 ut_asserteq_ptr(NULL, uclass_find(UCLASS_TEST));
+7
tools/binman/binman.rst
··· 711 711 information about what needs to be fixed. See missing-blob-help for the 712 712 message for each tag. 713 713 714 + assume-size: 715 + Sets the assumed size of a blob entry if it is missing. This allows for a 716 + check that the rest of the image fits into the available space, even when 717 + the contents are not available. If the entry is missing, Binman will use 718 + this assumed size for the entry size, including creating a fake file of that 719 + size if requested. 720 + 714 721 no-expanded: 715 722 By default binman substitutes entries with expanded versions if available, 716 723 so that a `u-boot` entry type turns into `u-boot-expanded`, for example. The
+86 -29
tools/binman/entries.rst
··· 470 470 471 471 .. _etype_efi_capsule: 472 472 473 - Entry: capsule: Entry for generating EFI Capsule files 474 - ------------------------------------------------------ 473 + Entry: efi-capsule: Generate EFI capsules 474 + ----------------------------------------- 475 475 476 - The parameters needed for generation of the capsules can be provided 477 - as properties in the entry. 476 + The parameters needed for generation of the capsules can 477 + be provided as properties in the entry. 478 478 479 479 Properties / Entry arguments: 480 480 - image-index: Unique number for identifying corresponding ··· 495 495 file. Mandatory property for generating signed capsules. 496 496 - oem-flags - OEM flags to be passed through capsule header. 497 497 498 - Since this is a subclass of Entry_section, all properties of the parent 499 - class also apply here. Except for the properties stated as mandatory, the 500 - rest of the properties are optional. 498 + Since this is a subclass of Entry_section, all properties of the parent 499 + class also apply here. Except for the properties stated as mandatory, the 500 + rest of the properties are optional. 501 501 502 502 For more details on the description of the capsule format, and the capsule 503 503 update functionality, refer Section 8.5 and Chapter 23 in the `UEFI ··· 510 510 A typical capsule entry node would then look something like this:: 511 511 512 512 capsule { 513 - type = "efi-capsule"; 514 - image-index = <0x1>; 515 - /* Image GUID for testing capsule update */ 516 - image-guid = SANDBOX_UBOOT_IMAGE_GUID; 517 - hardware-instance = <0x0>; 518 - private-key = "path/to/the/private/key"; 519 - public-key-cert = "path/to/the/public-key-cert"; 520 - oem-flags = <0x8000>; 513 + type = "efi-capsule"; 514 + image-index = <0x1>; 515 + /* Image GUID for testing capsule update */ 516 + image-guid = SANDBOX_UBOOT_IMAGE_GUID; 517 + hardware-instance = <0x0>; 518 + private-key = "path/to/the/private/key"; 519 + public-key-cert = "path/to/the/public-key-cert"; 520 + oem-flags = <0x8000>; 521 521 522 - u-boot { 523 - }; 522 + u-boot { 523 + }; 524 524 }; 525 525 526 526 In the above example, the capsule payload is the U-Boot image. The ··· 534 534 535 535 .. _etype_efi_empty_capsule: 536 536 537 - Entry: efi-empty-capsule: Entry for generating EFI Empty Capsule files 538 - ---------------------------------------------------------------------- 537 + Entry: efi-empty-capsule: Generate EFI empty capsules 538 + ----------------------------------------------------- 539 539 540 540 The parameters needed for generation of the empty capsules can 541 541 be provided as properties in the entry. ··· 551 551 specification`_. For more information on the empty capsule, refer the 552 552 sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_. 553 553 554 - A typical accept empty capsule entry node would then look something 555 - like this:: 554 + A typical accept empty capsule entry node would then look something like 555 + this:: 556 556 557 557 empty-capsule { 558 - type = "efi-empty-capsule"; 559 - /* GUID of the image being accepted */ 560 - image-type-id = SANDBOX_UBOOT_IMAGE_GUID; 561 - capsule-type = "accept"; 558 + type = "efi-empty-capsule"; 559 + /* GUID of image being accepted */ 560 + image-type-id = SANDBOX_UBOOT_IMAGE_GUID; 561 + capsule-type = "accept"; 562 562 }; 563 563 564 - A typical revert empty capsule entry node would then look something 565 - like this:: 564 + A typical revert empty capsule entry node would then look something like 565 + this:: 566 566 567 567 empty-capsule { 568 - type = "efi-empty-capsule"; 569 - capsule-type = "revert"; 568 + type = "efi-empty-capsule"; 569 + capsule-type = "revert"; 570 570 }; 571 571 572 572 The empty capsules do not have any input payload image. ··· 1521 1521 1522 1522 1523 1523 1524 + .. _etype_nxp_imx8mcst: 1525 + 1526 + Entry: nxp-imx8mcst: NXP i.MX8M CST .cfg file generator and cst invoker 1527 + ----------------------------------------------------------------------- 1528 + 1529 + Properties / Entry arguments: 1530 + - nxp,loader-address - loader address (SPL text base) 1531 + 1532 + 1533 + 1534 + .. _etype_nxp_imx8mimage: 1535 + 1536 + Entry: nxp-imx8mimage: NXP i.MX8M imx8mimage .cfg file generator and mkimage invoker 1537 + ------------------------------------------------------------------------------------ 1538 + 1539 + Properties / Entry arguments: 1540 + - nxp,boot-from - device to boot from (e.g. 'sd') 1541 + - nxp,loader-address - loader address (SPL text base) 1542 + - nxp,rom-version - BootROM version ('2' for i.MX8M Nano and Plus) 1543 + 1544 + 1545 + 1524 1546 .. _etype_opensbi: 1525 1547 1526 1548 Entry: opensbi: RISC-V OpenSBI fw_dynamic blob ··· 1929 1951 - content: List of phandles to entries to sign 1930 1952 - keyfile: Filename of file containing key to sign binary with 1931 1953 - sha: Hash function to be used for signing 1954 + - auth-in-place: This is an integer field that contains two pieces 1955 + of information: 1956 + 1957 + - Lower Byte - Remains 0x02 as per our use case 1958 + ( 0x02: Move the authenticated binary back to the header ) 1959 + - Upper Byte - The Host ID of the core owning the firewall 1932 1960 1933 1961 Output files: 1934 1962 - input.<unique_name> - input file passed to openssl ··· 1936 1964 used as the config file) 1937 1965 - cert.<unique_name> - output file generated by openssl (which is 1938 1966 used as the entry contents) 1967 + 1968 + Depending on auth-in-place information in the inputs, we read the 1969 + firewall nodes that describe the configurations of firewall that TIFS 1970 + will be doing after reading the certificate. 1971 + 1972 + The syntax of the firewall nodes are as such:: 1973 + 1974 + firewall-257-0 { 1975 + id = <257>; /* The ID of the firewall being configured */ 1976 + region = <0>; /* Region number to configure */ 1977 + 1978 + control = /* The control register */ 1979 + <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>; 1980 + 1981 + permissions = /* The permission registers */ 1982 + <((FWPRIVID_ALL << FWPRIVID_SHIFT) | 1983 + FWPERM_SECURE_PRIV_RWCD | 1984 + FWPERM_SECURE_USER_RWCD | 1985 + FWPERM_NON_SECURE_PRIV_RWCD | 1986 + FWPERM_NON_SECURE_USER_RWCD)>; 1987 + 1988 + /* More defines can be found in k3-security.h */ 1989 + 1990 + start_address = /* The Start Address of the firewall */ 1991 + <0x0 0x0>; 1992 + end_address = /* The End Address of the firewall */ 1993 + <0xff 0xffffffff>; 1994 + }; 1995 + 1939 1996 1940 1997 openssl signs the provided data, using the TI templated config file and 1941 1998 writes the signature in this entry. This allows verification that the
+2 -1
tools/binman/entry.py
··· 315 315 self.overlap = fdt_util.GetBool(self._node, 'overlap') 316 316 if self.overlap: 317 317 self.required_props += ['offset', 'size'] 318 + self.assume_size = fdt_util.GetInt(self._node, 'assume-size', 0) 318 319 319 320 # This is only supported by blobs and sections at present 320 321 self.compress = fdt_util.GetString(self._node, 'compress', 'none') ··· 812 813 as missing 813 814 """ 814 815 print('''Binman Entry Documentation 815 - =========================== 816 + ========================== 816 817 817 818 This file describes the entry types supported by binman. These entry types can 818 819 be placed in an image one by one to build up a final firmware image. It is
+3 -3
tools/binman/entry_test.py
··· 103 103 ent = entry.Entry.Create(None, self.GetNode(), 'missing', 104 104 missing_etype=True) 105 105 self.assertTrue(isinstance(ent, Entry_blob)) 106 - self.assertEquals('missing', ent.etype) 106 + self.assertEqual('missing', ent.etype) 107 107 108 108 def testDecompressData(self): 109 109 """Test the DecompressData() method of the base class""" ··· 111 111 base.compress = 'lz4' 112 112 bintools = {} 113 113 base.comp_bintool = base.AddBintool(bintools, '_testing') 114 - self.assertEquals(tools.get_bytes(0, 1024), base.CompressData(b'abc')) 115 - self.assertEquals(tools.get_bytes(0, 1024), base.DecompressData(b'abc')) 114 + self.assertEqual(tools.get_bytes(0, 1024), base.CompressData(b'abc')) 115 + self.assertEqual(tools.get_bytes(0, 1024), base.DecompressData(b'abc')) 116 116 117 117 def testLookupOffset(self): 118 118 """Test the lookup_offset() method of the base class"""
+6 -1
tools/binman/etype/blob.py
··· 48 48 self.external and (self.optional or self.section.GetAllowMissing())) 49 49 # Allow the file to be missing 50 50 if not self._pathname: 51 + if not fake_size and self.assume_size: 52 + fake_size = self.assume_size 51 53 self._pathname, faked = self.check_fake_fname(self._filename, 52 54 fake_size) 53 55 self.missing = True 54 56 if not faked: 55 - self.SetContents(b'') 57 + content_size = 0 58 + if self.assume_size: # Ensure we get test coverage on next line 59 + content_size = self.assume_size 60 + self.SetContents(tools.get_bytes(0, content_size)) 56 61 return True 57 62 58 63 self.ReadBlobContents()
+20 -20
tools/binman/etype/efi_capsule.py
··· 36 36 be provided as properties in the entry. 37 37 38 38 Properties / Entry arguments: 39 - - image-index: Unique number for identifying corresponding 40 - payload image. Number between 1 and descriptor count, i.e. 41 - the total number of firmware images that can be updated. Mandatory 42 - property. 43 - - image-guid: Image GUID which will be used for identifying the 44 - updatable image on the board. Mandatory property. 45 - - hardware-instance: Optional number for identifying unique 46 - hardware instance of a device in the system. Default value of 0 47 - for images where value is not to be used. 48 - - fw-version: Value of image version that can be put on the capsule 49 - through the Firmware Management Protocol(FMP) header. 50 - - monotonic-count: Count used when signing an image. 51 - - private-key: Path to PEM formatted .key private key file. Mandatory 52 - property for generating signed capsules. 53 - - public-key-cert: Path to PEM formatted .crt public key certificate 54 - file. Mandatory property for generating signed capsules. 55 - - oem-flags - OEM flags to be passed through capsule header. 39 + - image-index: Unique number for identifying corresponding 40 + payload image. Number between 1 and descriptor count, i.e. 41 + the total number of firmware images that can be updated. Mandatory 42 + property. 43 + - image-guid: Image GUID which will be used for identifying the 44 + updatable image on the board. Mandatory property. 45 + - hardware-instance: Optional number for identifying unique 46 + hardware instance of a device in the system. Default value of 0 47 + for images where value is not to be used. 48 + - fw-version: Value of image version that can be put on the capsule 49 + through the Firmware Management Protocol(FMP) header. 50 + - monotonic-count: Count used when signing an image. 51 + - private-key: Path to PEM formatted .key private key file. Mandatory 52 + property for generating signed capsules. 53 + - public-key-cert: Path to PEM formatted .crt public key certificate 54 + file. Mandatory property for generating signed capsules. 55 + - oem-flags - OEM flags to be passed through capsule header. 56 56 57 57 Since this is a subclass of Entry_section, all properties of the parent 58 58 class also apply here. Except for the properties stated as mandatory, the ··· 66 66 properties in the entry. The payload to be used in the capsule is to be 67 67 provided as a subnode of the capsule entry. 68 68 69 - A typical capsule entry node would then look something like this 69 + A typical capsule entry node would then look something like this:: 70 70 71 - capsule { 71 + capsule { 72 72 type = "efi-capsule"; 73 73 image-index = <0x1>; 74 74 /* Image GUID for testing capsule update */ ··· 80 80 81 81 u-boot { 82 82 }; 83 - }; 83 + }; 84 84 85 85 In the above example, the capsule payload is the U-Boot image. The 86 86 capsule entry would read the contents of the payload and put them
+12 -10
tools/binman/etype/efi_empty_capsule.py
··· 19 19 be provided as properties in the entry. 20 20 21 21 Properties / Entry arguments: 22 - - image-guid: Image GUID which will be used for identifying the 23 - updatable image on the board. Mandatory for accept capsule. 24 - - capsule-type - String to indicate type of capsule to generate. Valid 25 - values are 'accept' and 'revert'. 22 + - image-guid: Image GUID which will be used for identifying the 23 + updatable image on the board. Mandatory for accept capsule. 24 + - capsule-type - String to indicate type of capsule to generate. Valid 25 + values are 'accept' and 'revert'. 26 26 27 27 For more details on the description of the capsule format, and the capsule 28 28 update functionality, refer Section 8.5 and Chapter 23 in the `UEFI 29 29 specification`_. For more information on the empty capsule, refer the 30 30 sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_. 31 31 32 - A typical accept empty capsule entry node would then look something like this 32 + A typical accept empty capsule entry node would then look something like 33 + this:: 33 34 34 - empty-capsule { 35 + empty-capsule { 35 36 type = "efi-empty-capsule"; 36 37 /* GUID of image being accepted */ 37 38 image-type-id = SANDBOX_UBOOT_IMAGE_GUID; 38 39 capsule-type = "accept"; 39 - }; 40 + }; 40 41 41 - A typical revert empty capsule entry node would then look something like this 42 + A typical revert empty capsule entry node would then look something like 43 + this:: 42 44 43 - empty-capsule { 45 + empty-capsule { 44 46 type = "efi-empty-capsule"; 45 47 capsule-type = "revert"; 46 - }; 48 + }; 47 49 48 50 The empty capsules do not have any input payload image. 49 51
+1 -1
tools/binman/etype/intel_descriptor.py
··· 59 59 if self.missing: 60 60 # Return zero offsets so that these entries get placed somewhere 61 61 if self.HasSibling('intel-me'): 62 - info['intel-me'] = [0, None] 62 + info['intel-me'] = [0x1000, None] 63 63 return info 64 64 offset = self.data.find(FD_SIGNATURE) 65 65 if offset == -1:
+23 -22
tools/binman/etype/ti_secure.py
··· 53 53 - keyfile: Filename of file containing key to sign binary with 54 54 - sha: Hash function to be used for signing 55 55 - auth-in-place: This is an integer field that contains two pieces 56 - of information 57 - Lower Byte - Remains 0x02 as per our use case 58 - ( 0x02: Move the authenticated binary back to the header ) 59 - Upper Byte - The Host ID of the core owning the firewall 56 + of information: 57 + 58 + - Lower Byte - Remains 0x02 as per our use case 59 + ( 0x02: Move the authenticated binary back to the header ) 60 + - Upper Byte - The Host ID of the core owning the firewall 60 61 61 62 Output files: 62 63 - input.<unique_name> - input file passed to openssl ··· 69 70 firewall nodes that describe the configurations of firewall that TIFS 70 71 will be doing after reading the certificate. 71 72 72 - The syntax of the firewall nodes are as such: 73 + The syntax of the firewall nodes are as such:: 73 74 74 - firewall-257-0 { 75 - id = <257>; /* The ID of the firewall being configured */ 76 - region = <0>; /* Region number to configure */ 75 + firewall-257-0 { 76 + id = <257>; /* The ID of the firewall being configured */ 77 + region = <0>; /* Region number to configure */ 77 78 78 - control = /* The control register */ 79 - <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>; 79 + control = /* The control register */ 80 + <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>; 80 81 81 - permissions = /* The permission registers */ 82 - <((FWPRIVID_ALL << FWPRIVID_SHIFT) | 83 - FWPERM_SECURE_PRIV_RWCD | 84 - FWPERM_SECURE_USER_RWCD | 85 - FWPERM_NON_SECURE_PRIV_RWCD | 86 - FWPERM_NON_SECURE_USER_RWCD)>; 82 + permissions = /* The permission registers */ 83 + <((FWPRIVID_ALL << FWPRIVID_SHIFT) | 84 + FWPERM_SECURE_PRIV_RWCD | 85 + FWPERM_SECURE_USER_RWCD | 86 + FWPERM_NON_SECURE_PRIV_RWCD | 87 + FWPERM_NON_SECURE_USER_RWCD)>; 87 88 88 - /* More defines can be found in k3-security.h */ 89 + /* More defines can be found in k3-security.h */ 89 90 90 - start_address = /* The Start Address of the firewall */ 91 - <0x0 0x0>; 92 - end_address = /* The End Address of the firewall */ 93 - <0xff 0xffffffff>; 94 - }; 91 + start_address = /* The Start Address of the firewall */ 92 + <0x0 0x0>; 93 + end_address = /* The End Address of the firewall */ 94 + <0xff 0xffffffff>; 95 + }; 95 96 96 97 97 98 openssl signs the provided data, using the TI templated config file and
+24 -24
tools/binman/fdt_test.py
··· 44 44 fname = self.GetCompiled('045_prop_test.dts') 45 45 dt = FdtScan(fname) 46 46 node = dt.GetNode('/binman/intel-me') 47 - self.assertEquals('intel-me', node.name) 47 + self.assertEqual('intel-me', node.name) 48 48 val = fdt_util.GetString(node, 'filename') 49 - self.assertEquals(str, type(val)) 50 - self.assertEquals('me.bin', val) 49 + self.assertEqual(str, type(val)) 50 + self.assertEqual('me.bin', val) 51 51 52 52 prop = node.props['intval'] 53 - self.assertEquals(fdt.Type.INT, prop.type) 54 - self.assertEquals(3, fdt_util.GetInt(node, 'intval')) 53 + self.assertEqual(fdt.Type.INT, prop.type) 54 + self.assertEqual(3, fdt_util.GetInt(node, 'intval')) 55 55 56 56 prop = node.props['intarray'] 57 - self.assertEquals(fdt.Type.INT, prop.type) 58 - self.assertEquals(list, type(prop.value)) 59 - self.assertEquals(2, len(prop.value)) 60 - self.assertEquals([5, 6], 57 + self.assertEqual(fdt.Type.INT, prop.type) 58 + self.assertEqual(list, type(prop.value)) 59 + self.assertEqual(2, len(prop.value)) 60 + self.assertEqual([5, 6], 61 61 [fdt_util.fdt32_to_cpu(val) for val in prop.value]) 62 62 63 63 prop = node.props['byteval'] 64 - self.assertEquals(fdt.Type.BYTE, prop.type) 65 - self.assertEquals(chr(8), prop.value) 64 + self.assertEqual(fdt.Type.BYTE, prop.type) 65 + self.assertEqual(chr(8), prop.value) 66 66 67 67 prop = node.props['bytearray'] 68 - self.assertEquals(fdt.Type.BYTE, prop.type) 69 - self.assertEquals(list, type(prop.value)) 70 - self.assertEquals(str, type(prop.value[0])) 71 - self.assertEquals(3, len(prop.value)) 72 - self.assertEquals([chr(1), '#', '4'], prop.value) 68 + self.assertEqual(fdt.Type.BYTE, prop.type) 69 + self.assertEqual(list, type(prop.value)) 70 + self.assertEqual(str, type(prop.value[0])) 71 + self.assertEqual(3, len(prop.value)) 72 + self.assertEqual([chr(1), '#', '4'], prop.value) 73 73 74 74 prop = node.props['longbytearray'] 75 - self.assertEquals(fdt.Type.INT, prop.type) 76 - self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray')) 75 + self.assertEqual(fdt.Type.INT, prop.type) 76 + self.assertEqual(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray')) 77 77 78 78 prop = node.props['stringval'] 79 - self.assertEquals(fdt.Type.STRING, prop.type) 80 - self.assertEquals('message2', fdt_util.GetString(node, 'stringval')) 79 + self.assertEqual(fdt.Type.STRING, prop.type) 80 + self.assertEqual('message2', fdt_util.GetString(node, 'stringval')) 81 81 82 82 prop = node.props['stringarray'] 83 - self.assertEquals(fdt.Type.STRING, prop.type) 84 - self.assertEquals(list, type(prop.value)) 85 - self.assertEquals(3, len(prop.value)) 86 - self.assertEquals(['another', 'multi-word', 'message'], prop.value) 83 + self.assertEqual(fdt.Type.STRING, prop.type) 84 + self.assertEqual(list, type(prop.value)) 85 + self.assertEqual(3, len(prop.value)) 86 + self.assertEqual(['another', 'multi-word', 'message'], prop.value)
+49 -21
tools/binman/ftest.py
··· 2095 2095 dtb.Scan() 2096 2096 props = self._GetPropTree(dtb, ['size', 'uncomp-size']) 2097 2097 orig = self._decompress(data) 2098 - self.assertEquals(COMPRESS_DATA, orig) 2098 + self.assertEqual(COMPRESS_DATA, orig) 2099 2099 2100 2100 # Do a sanity check on various fields 2101 2101 image = control.images['image'] ··· 2809 2809 2810 2810 orig_entry = orig_image.GetEntries()['fdtmap'] 2811 2811 entry = image.GetEntries()['fdtmap'] 2812 - self.assertEquals(orig_entry.offset, entry.offset) 2813 - self.assertEquals(orig_entry.size, entry.size) 2814 - self.assertEquals(orig_entry.image_pos, entry.image_pos) 2812 + self.assertEqual(orig_entry.offset, entry.offset) 2813 + self.assertEqual(orig_entry.size, entry.size) 2814 + self.assertEqual(orig_entry.image_pos, entry.image_pos) 2815 2815 2816 2816 def testReadImageNoHeader(self): 2817 2817 """Test accessing an image's FDT map without an image header""" ··· 3895 3895 mat = re_line.match(line) 3896 3896 vals[mat.group(1)].append(mat.group(2)) 3897 3897 3898 - self.assertEquals('FIT description: test-desc', lines[0]) 3898 + self.assertEqual('FIT description: test-desc', lines[0]) 3899 3899 self.assertIn('Created:', lines[1]) 3900 3900 self.assertIn('Image 0 (kernel)', vals) 3901 3901 self.assertIn('Hash value', vals) ··· 4012 4012 fit_pos, 4013 4013 fdt_util.fdt32_to_cpu(fnode.props['data-position'].value)) 4014 4014 4015 - self.assertEquals(expected_size, len(data)) 4015 + self.assertEqual(expected_size, len(data)) 4016 4016 actual_pos = len(U_BOOT_DATA) + fit_pos 4017 4017 self.assertEqual(U_BOOT_DATA + b'aa', 4018 4018 data[actual_pos:actual_pos + external_data_size]) ··· 4431 4431 props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', 4432 4432 'uncomp-size']) 4433 4433 orig = self._decompress(data) 4434 - self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) 4434 + self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, orig) 4435 4435 4436 4436 # Do a sanity check on various fields 4437 4437 image = control.images['image'] ··· 4475 4475 'uncomp-size']) 4476 4476 orig = self._decompress(data) 4477 4477 4478 - self.assertEquals(COMPRESS_DATA + COMPRESS_DATA + U_BOOT_DATA, orig) 4478 + self.assertEqual(COMPRESS_DATA + COMPRESS_DATA + U_BOOT_DATA, orig) 4479 4479 4480 4480 # Do a sanity check on various fields 4481 4481 image = control.images['image'] ··· 4519 4519 props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', 4520 4520 'uncomp-size']) 4521 4521 orig = self._decompress(data) 4522 - self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) 4522 + self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, orig) 4523 4523 expected = { 4524 4524 'section/blob:offset': 0, 4525 4525 'section/blob:size': len(COMPRESS_DATA), ··· 4545 4545 props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', 4546 4546 'uncomp-size']) 4547 4547 orig = self._decompress(data) 4548 - self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) 4548 + self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, orig) 4549 4549 expected = { 4550 4550 'section/blob:offset': 0, 4551 4551 'section/blob:size': len(COMPRESS_DATA), ··· 4580 4580 'uncomp-size']) 4581 4581 4582 4582 base = data[len(U_BOOT_DATA):] 4583 - self.assertEquals(U_BOOT_DATA, base[:len(U_BOOT_DATA)]) 4583 + self.assertEqual(U_BOOT_DATA, base[:len(U_BOOT_DATA)]) 4584 4584 rest = base[len(U_BOOT_DATA):] 4585 4585 4586 4586 # Check compressed data ··· 4588 4588 expect1 = bintool.compress(COMPRESS_DATA + U_BOOT_DATA) 4589 4589 data1 = rest[:len(expect1)] 4590 4590 section1 = self._decompress(data1) 4591 - self.assertEquals(expect1, data1) 4592 - self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, section1) 4591 + self.assertEqual(expect1, data1) 4592 + self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, section1) 4593 4593 rest1 = rest[len(expect1):] 4594 4594 4595 4595 expect2 = bintool.compress(COMPRESS_DATA + COMPRESS_DATA) 4596 4596 data2 = rest1[:len(expect2)] 4597 4597 section2 = self._decompress(data2) 4598 - self.assertEquals(expect2, data2) 4599 - self.assertEquals(COMPRESS_DATA + COMPRESS_DATA, section2) 4598 + self.assertEqual(expect2, data2) 4599 + self.assertEqual(COMPRESS_DATA + COMPRESS_DATA, section2) 4600 4600 rest2 = rest1[len(expect2):] 4601 4601 4602 4602 expect_size = (len(U_BOOT_DATA) + len(U_BOOT_DATA) + len(expect1) + 4603 4603 len(expect2) + len(U_BOOT_DATA)) 4604 - #self.assertEquals(expect_size, len(data)) 4604 + #self.assertEqual(expect_size, len(data)) 4605 4605 4606 - #self.assertEquals(U_BOOT_DATA, rest2) 4606 + #self.assertEqual(U_BOOT_DATA, rest2) 4607 4607 4608 4608 self.maxDiff = None 4609 4609 expected = { ··· 4695 4695 4696 4696 u_boot = image.GetEntries()['section'].GetEntries()['u-boot'] 4697 4697 4698 - self.assertEquals(U_BOOT_DATA, u_boot.ReadData()) 4698 + self.assertEqual(U_BOOT_DATA, u_boot.ReadData()) 4699 4699 4700 4700 def testTplNoDtb(self): 4701 4701 """Test that an image with tpl/u-boot-tpl-nodtb.bin can be created""" ··· 5526 5526 segments, entry = elf.read_loadable_segments(elf_data) 5527 5527 5528 5528 # We assume there are two segments 5529 - self.assertEquals(2, len(segments)) 5529 + self.assertEqual(2, len(segments)) 5530 5530 5531 5531 atf1 = dtb.GetNode('/images/atf-1') 5532 5532 _, start, data = segments[0] ··· 6107 6107 data = bintool.compress(COMPRESS_DATA) 6108 6108 self.assertNotEqual(COMPRESS_DATA, data) 6109 6109 orig = bintool.decompress(data) 6110 - self.assertEquals(COMPRESS_DATA, orig) 6110 + self.assertEqual(COMPRESS_DATA, orig) 6111 6111 6112 6112 def testCompUtilVersions(self): 6113 6113 """Test tool version of compression algorithms""" ··· 6125 6125 self.assertNotEqual(COMPRESS_DATA, data) 6126 6126 data += tools.get_bytes(0, 64) 6127 6127 orig = bintool.decompress(data) 6128 - self.assertEquals(COMPRESS_DATA, orig) 6128 + self.assertEqual(COMPRESS_DATA, orig) 6129 6129 6130 6130 def testCompressDtbZstd(self): 6131 6131 """Test that zstd compress of device-tree files failed""" ··· 7459 7459 """Test that both accept and revert capsule are not specified""" 7460 7460 with self.assertRaises(ValueError) as e: 7461 7461 self._DoReadFile('323_capsule_accept_revert_missing.dts') 7462 + 7463 + def test_assume_size(self): 7464 + """Test handling of the assume-size property for external blob""" 7465 + with self.assertRaises(ValueError) as e: 7466 + self._DoTestFile('326_assume_size.dts', allow_missing=True, 7467 + allow_fake_blobs=True) 7468 + self.assertIn("contents size 0xa (10) exceeds section size 0x9 (9)", 7469 + str(e.exception)) 7470 + 7471 + def test_assume_size_ok(self): 7472 + """Test handling of the assume-size where it fits OK""" 7473 + with test_util.capture_sys_output() as (stdout, stderr): 7474 + self._DoTestFile('327_assume_size_ok.dts', allow_missing=True, 7475 + allow_fake_blobs=True) 7476 + err = stderr.getvalue() 7477 + self.assertRegex( 7478 + err, 7479 + "Image '.*' has faked external blobs and is non-functional: .*") 7480 + 7481 + def test_assume_size_no_fake(self): 7482 + """Test handling of the assume-size where it fits OK""" 7483 + with test_util.capture_sys_output() as (stdout, stderr): 7484 + self._DoTestFile('327_assume_size_ok.dts', allow_missing=True) 7485 + err = stderr.getvalue() 7486 + self.assertRegex( 7487 + err, 7488 + "Image '.*' is missing external blobs and is non-functional: .*") 7489 + 7462 7490 7463 7491 if __name__ == "__main__": 7464 7492 unittest.main()
+16
tools/binman/test/326_assume_size.dts
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + /dts-v1/; 4 + 5 + / { 6 + #address-cells = <1>; 7 + #size-cells = <1>; 8 + 9 + binman { 10 + size = <9>; 11 + blob-ext { 12 + filename = "assume_blob"; 13 + assume-size = <10>; 14 + }; 15 + }; 16 + };
+16
tools/binman/test/327_assume_size_ok.dts
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + /dts-v1/; 4 + 5 + / { 6 + #address-cells = <1>; 7 + #size-cells = <1>; 8 + 9 + binman { 10 + size = <10>; 11 + blob-ext { 12 + filename = "assume_blob"; 13 + assume-size = <10>; 14 + }; 15 + }; 16 + };
+4 -1
tools/buildman/bsettings.py
··· 29 29 settings.read(config_fname) 30 30 31 31 def add_file(data): 32 - settings.readfp(io.StringIO(data)) 32 + settings.read_file(io.StringIO(data)) 33 + 34 + def add_section(name): 35 + settings.add_section(name) 33 36 34 37 def get_items(section): 35 38 """Get the items from a section of the config.
+11 -12
tools/buildman/builder.py
··· 255 255 256 256 def __init__(self, toolchains, base_dir, git_dir, num_threads, num_jobs, 257 257 gnu_make='make', checkout=True, show_unknown=True, step=1, 258 - no_subdirs=False, full_path=False, verbose_build=False, 259 - mrproper=False, per_board_out_dir=False, 260 - config_only=False, squash_config_y=False, 261 - warnings_as_errors=False, work_in_output=False, 262 - test_thread_exceptions=False, adjust_cfg=None, 263 - allow_missing=False, no_lto=False, reproducible_builds=False, 264 - force_build=False, force_build_failures=False, 265 - force_reconfig=False, in_tree=False, 266 - force_config_on_failure=False, make_func=None): 258 + no_subdirs=False, verbose_build=False, 259 + mrproper=False, fallback_mrproper=False, 260 + per_board_out_dir=False, config_only=False, 261 + squash_config_y=False, warnings_as_errors=False, 262 + work_in_output=False, test_thread_exceptions=False, 263 + adjust_cfg=None, allow_missing=False, no_lto=False, 264 + reproducible_builds=False, force_build=False, 265 + force_build_failures=False, force_reconfig=False, 266 + in_tree=False, force_config_on_failure=False, make_func=None): 267 267 """Create a new Builder object 268 268 269 269 Args: ··· 279 279 step: 1 to process every commit, n to process every nth commit 280 280 no_subdirs: Don't create subdirectories when building current 281 281 source for a single board 282 - full_path: Return the full path in CROSS_COMPILE and don't set 283 - PATH 284 282 verbose_build: Run build with V=1 and don't use 'make -s' 285 283 mrproper: Always run 'make mrproper' when configuring 284 + fallback_mrproper: Run 'make mrproper' and retry on build failure 286 285 per_board_out_dir: Build in a separate persistent directory per 287 286 board rather than a thread-specific directory 288 287 config_only: Only configure each build, don't build it ··· 336 335 self._step = step 337 336 self._error_lines = 0 338 337 self.no_subdirs = no_subdirs 339 - self.full_path = full_path 340 338 self.verbose_build = verbose_build 341 339 self.config_only = config_only 342 340 self.squash_config_y = squash_config_y ··· 352 350 self.force_reconfig = force_reconfig 353 351 self.in_tree = in_tree 354 352 self.force_config_on_failure = force_config_on_failure 353 + self.fallback_mrproper = fallback_mrproper 355 354 356 355 if not self.squash_config_y: 357 356 self.config_filenames += EXTRA_CONFIG_FILENAMES
+24 -16
tools/buildman/builderthread.py
··· 240 240 return args, cwd, src_dir 241 241 242 242 def _reconfigure(self, commit, brd, cwd, args, env, config_args, config_out, 243 - cmd_list): 243 + cmd_list, mrproper): 244 244 """Reconfigure the build 245 245 246 246 Args: ··· 251 251 env (dict): Environment strings 252 252 config_args (list of str): defconfig arg for this board 253 253 cmd_list (list of str): List to add the commands to, for logging 254 + mrproper (bool): True to run mrproper first 254 255 255 256 Returns: 256 257 CommandResult object 257 258 """ 258 - if self.mrproper: 259 + if mrproper: 259 260 result = self.make(commit, brd, 'mrproper', cwd, 'mrproper', *args, 260 261 env=env) 261 262 config_out.write(result.combined) ··· 380 381 commit = 'current' 381 382 return commit 382 383 383 - def _config_and_build(self, commit_upto, brd, work_dir, do_config, 384 + def _config_and_build(self, commit_upto, brd, work_dir, do_config, mrproper, 384 385 config_only, adjust_cfg, commit, out_dir, out_rel_dir, 385 386 result): 386 387 """Do the build, configuring first if necessary ··· 390 391 brd (Board): Board to create arguments for 391 392 work_dir (str): Directory to which the source will be checked out 392 393 do_config (bool): True to run a make <board>_defconfig on the source 394 + mrproper (bool): True to run mrproper first 393 395 config_only (bool): Only configure the source, do not build it 394 396 adjust_cfg (list of str): See the cfgutil module and run_commit() 395 397 commit (Commit): Commit only being built ··· 404 406 the next incremental build 405 407 """ 406 408 # Set up the environment and command line 407 - env = self.toolchain.MakeEnvironment(self.builder.full_path) 409 + env = self.toolchain.MakeEnvironment() 408 410 mkdir(out_dir) 409 411 410 412 args, cwd, src_dir = self._build_args(brd, out_dir, out_rel_dir, ··· 419 421 cmd_list = [] 420 422 if do_config or adjust_cfg: 421 423 result = self._reconfigure( 422 - commit, brd, cwd, args, env, config_args, config_out, cmd_list) 424 + commit, brd, cwd, args, env, config_args, config_out, cmd_list, 425 + mrproper) 423 426 do_config = False # No need to configure next time 424 427 if adjust_cfg: 425 428 cfgutil.adjust_cfg_file(cfg_file, adjust_cfg) ··· 445 448 result.cmd_list = cmd_list 446 449 return result, do_config 447 450 448 - def run_commit(self, commit_upto, brd, work_dir, do_config, config_only, 449 - force_build, force_build_failures, work_in_output, 450 - adjust_cfg): 451 + def run_commit(self, commit_upto, brd, work_dir, do_config, mrproper, 452 + config_only, force_build, force_build_failures, 453 + work_in_output, adjust_cfg): 451 454 """Build a particular commit. 452 455 453 456 If the build is already done, and we are not forcing a build, we skip ··· 458 461 brd (Board): Board to build 459 462 work_dir (str): Directory to which the source will be checked out 460 463 do_config (bool): True to run a make <board>_defconfig on the source 464 + mrproper (bool): True to run mrproper first 461 465 config_only (bool): Only configure the source, do not build it 462 466 force_build (bool): Force a build even if one was previously done 463 467 force_build_failures (bool): Force a bulid if the previous result ··· 498 502 if self.toolchain: 499 503 commit = self._checkout(commit_upto, work_dir) 500 504 result, do_config = self._config_and_build( 501 - commit_upto, brd, work_dir, do_config, config_only, 502 - adjust_cfg, commit, out_dir, out_rel_dir, result) 505 + commit_upto, brd, work_dir, do_config, mrproper, 506 + config_only, adjust_cfg, commit, out_dir, out_rel_dir, 507 + result) 503 508 result.already_done = False 504 509 505 510 result.toolchain = self.toolchain ··· 569 574 outf.write(f'{result.return_code}') 570 575 571 576 # Write out the image and function size information and an objdump 572 - env = result.toolchain.MakeEnvironment(self.builder.full_path) 577 + env = result.toolchain.MakeEnvironment() 573 578 with open(os.path.join(build_dir, 'out-env'), 'wb') as outf: 574 579 for var in sorted(env.keys()): 575 580 outf.write(b'%s="%s"' % (var, env[var])) ··· 688 693 force_build = False 689 694 for commit_upto in range(0, len(job.commits), job.step): 690 695 result, request_config = self.run_commit(commit_upto, brd, 691 - work_dir, do_config, self.builder.config_only, 696 + work_dir, do_config, self.mrproper, 697 + self.builder.config_only, 692 698 force_build or self.builder.force_build, 693 699 self.builder.force_build_failures, 694 700 job.work_in_output, job.adjust_cfg) 695 701 failed = result.return_code or result.stderr 696 702 did_config = do_config 697 - if failed and not do_config: 703 + if failed and not do_config and not self.mrproper: 698 704 # If our incremental build failed, try building again 699 705 # with a reconfig. 700 706 if self.builder.force_config_on_failure: 701 707 result, request_config = self.run_commit(commit_upto, 702 - brd, work_dir, True, False, True, False, 703 - job.work_in_output, job.adjust_cfg) 708 + brd, work_dir, True, 709 + self.mrproper or self.builder.fallback_mrproper, 710 + False, True, False, job.work_in_output, 711 + job.adjust_cfg) 704 712 did_config = True 705 713 if not self.builder.force_reconfig: 706 714 do_config = request_config ··· 744 752 else: 745 753 # Just build the currently checked-out build 746 754 result, request_config = self.run_commit(None, brd, work_dir, True, 747 - self.builder.config_only, True, 755 + self.mrproper, self.builder.config_only, True, 748 756 self.builder.force_build_failures, job.work_in_output, 749 757 job.adjust_cfg) 750 758 result.commit_upto = 0
+7 -1
tools/buildman/buildman.rst
··· 995 995 first commit for each board. This reduces the amount of work 'make' does, and 996 996 hence speeds up the build. To force use of 'make mrproper', use -the -m flag. 997 997 This flag will slow down any buildman invocation, since it increases the amount 998 - of work done on any build. 998 + of work done on any build. An alternative is to use the --fallback-mrproper 999 + flag, which retries the build with 'make mrproper' only after a build failure. 999 1000 1000 1001 One possible application of buildman is as part of a continual edit, build, 1001 1002 edit, build, ... cycle; repeatedly applying buildman to the same change or ··· 1284 1285 then buildman hangs. Failing to handle any eventuality is a bug in buildman and 1285 1286 should be reported. But you can use -T0 to disable threading and hopefully 1286 1287 figure out the root cause of the build failure. 1288 + 1289 + For situations where buildman is invoked from multiple running processes, it is 1290 + sometimes useful to have buildman wait until the others have finished. Use the 1291 + --process-limit option for this: --process-limit 1 will allow only one buildman 1292 + to process jobs at a time. 1287 1293 1288 1294 Build summary 1289 1295 -------------
+5 -3
tools/buildman/cmdline.py
··· 90 90 parser.add_argument('--list-tool-chains', action='store_true', default=False, 91 91 help='List available tool chains (use -v to see probing detail)') 92 92 parser.add_argument('-m', '--mrproper', action='store_true', 93 - default=False, help="Run 'make mrproper before reconfiguring") 93 + default=False, help="Run 'make mrproper' before reconfiguring") 94 + parser.add_argument('--fallback-mrproper', action='store_true', 95 + default=False, help="Run 'make mrproper' and retry on build failure") 94 96 parser.add_argument( 95 97 '-M', '--allow-missing', action='store_true', default=False, 96 98 help='Tell binman to allow missing blobs and generate fake ones as needed') ··· 121 123 help="Override host toochain to use for sandbox (e.g. 'clang-7')") 122 124 parser.add_argument('-Q', '--quick', action='store_true', 123 125 default=False, help='Do a rough build, with limited warning resolution') 124 - parser.add_argument('-p', '--full-path', action='store_true', 125 - default=False, help="Use full toolchain path in CROSS_COMPILE") 126 126 parser.add_argument('-P', '--per-board-out-dir', action='store_true', 127 127 default=False, help="Use an O= (output) directory per board rather than per thread") 128 128 parser.add_argument('--print-arch', action='store_true', 129 129 default=False, help="Print the architecture for a board (ARCH=)") 130 + parser.add_argument('--process-limit', type=int, 131 + default=0, help='Limit to number of buildmans running at once') 130 132 parser.add_argument('-r', '--reproducible-builds', action='store_true', 131 133 help='Set SOURCE_DATE_EPOCH=0 to suuport a reproducible build') 132 134 parser.add_argument('-R', '--regen-board-list', type=str,
+141 -4
tools/buildman/control.py
··· 7 7 This holds the main control logic for buildman, when not running tests. 8 8 """ 9 9 10 + import getpass 10 11 import multiprocessing 11 12 import os 12 13 import shutil 13 14 import sys 15 + import tempfile 16 + import time 14 17 15 18 from buildman import boards 16 19 from buildman import bsettings ··· 21 24 from patman import patchstream 22 25 from u_boot_pylib import command 23 26 from u_boot_pylib import terminal 24 - from u_boot_pylib.terminal import tprint 27 + from u_boot_pylib import tools 28 + from u_boot_pylib.terminal import print_clear, tprint 25 29 26 30 TEST_BUILDER = None 31 + 32 + # Space-separated list of buildman process IDs currently running jobs 33 + RUNNING_FNAME = f'buildmanq.{getpass.getuser()}' 34 + 35 + # Lock file for access to RUNNING_FILE 36 + LOCK_FNAME = f'{RUNNING_FNAME}.lock' 37 + 38 + # Wait time for access to lock (seconds) 39 + LOCK_WAIT_S = 10 40 + 41 + # Wait time to start running 42 + RUN_WAIT_S = 300 27 43 28 44 def get_plural(count): 29 45 """Returns a plural 's' if count is not 1""" ··· 578 594 return adjust_cfg 579 595 580 596 597 + def read_procs(tmpdir=tempfile.gettempdir()): 598 + """Read the list of running buildman processes 599 + 600 + If the list is corrupted, returns an empty list 601 + 602 + Args: 603 + tmpdir (str): Temporary directory to use (for testing only) 604 + """ 605 + running_fname = os.path.join(tmpdir, RUNNING_FNAME) 606 + procs = [] 607 + if os.path.exists(running_fname): 608 + items = tools.read_file(running_fname, binary=False).split() 609 + try: 610 + procs = [int(x) for x in items] 611 + except ValueError: # Handle invalid format 612 + pass 613 + return procs 614 + 615 + 616 + def check_pid(pid): 617 + """Check for existence of a unix PID 618 + 619 + https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python 620 + 621 + Args: 622 + pid (int): PID to check 623 + 624 + Returns: 625 + True if it exists, else False 626 + """ 627 + try: 628 + os.kill(pid, 0) 629 + except OSError: 630 + return False 631 + else: 632 + return True 633 + 634 + 635 + def write_procs(procs, tmpdir=tempfile.gettempdir()): 636 + """Write the list of running buildman processes 637 + 638 + Args: 639 + tmpdir (str): Temporary directory to use (for testing only) 640 + """ 641 + running_fname = os.path.join(tmpdir, RUNNING_FNAME) 642 + tools.write_file(running_fname, ' '.join([str(p) for p in procs]), 643 + binary=False) 644 + 645 + # Allow another user to access the file 646 + os.chmod(running_fname, 0o666) 647 + 648 + def wait_for_process_limit(limit, tmpdir=tempfile.gettempdir(), 649 + pid=os.getpid()): 650 + """Wait until the number of buildman processes drops to the limit 651 + 652 + This uses FileLock to protect a 'running' file, which contains a list of 653 + PIDs of running buildman processes. The number of PIDs in the file indicates 654 + the number of running processes. 655 + 656 + When buildman starts up, it calls this function to wait until it is OK to 657 + start the build. 658 + 659 + On exit, no attempt is made to remove the PID from the file, since other 660 + buildman processes will notice that the PID is no-longer valid, and ignore 661 + it. 662 + 663 + Two timeouts are provided: 664 + LOCK_WAIT_S: length of time to wait for the lock; if this occurs, the 665 + lock is busted / removed before trying again 666 + RUN_WAIT_S: length of time to wait to be allowed to run; if this occurs, 667 + the build starts, with the PID being added to the file. 668 + 669 + Args: 670 + limit (int): Maximum number of buildman processes, including this one; 671 + must be > 0 672 + tmpdir (str): Temporary directory to use (for testing only) 673 + pid (int): Current process ID (for testing only) 674 + """ 675 + from filelock import Timeout, FileLock 676 + 677 + running_fname = os.path.join(tmpdir, RUNNING_FNAME) 678 + lock_fname = os.path.join(tmpdir, LOCK_FNAME) 679 + lock = FileLock(lock_fname) 680 + 681 + # Allow another user to access the file 682 + col = terminal.Color() 683 + tprint('Waiting for other buildman processes...', newline=False, 684 + colour=col.RED) 685 + 686 + claimed = False 687 + deadline = time.time() + RUN_WAIT_S 688 + while True: 689 + try: 690 + with lock.acquire(timeout=LOCK_WAIT_S): 691 + os.chmod(lock_fname, 0o666) 692 + procs = read_procs(tmpdir) 693 + 694 + # Drop PIDs which are not running 695 + procs = list(filter(check_pid, procs)) 696 + 697 + # If we haven't hit the limit, add ourself 698 + if len(procs) < limit: 699 + tprint('done...', newline=False) 700 + claimed = True 701 + if time.time() >= deadline: 702 + tprint('timeout...', newline=False) 703 + claimed = True 704 + if claimed: 705 + write_procs(procs + [pid], tmpdir) 706 + break 707 + 708 + except Timeout: 709 + tprint('failed to get lock: busting...', newline=False) 710 + os.remove(lock_fname) 711 + 712 + time.sleep(1) 713 + tprint('starting build', newline=False) 714 + print_clear() 715 + 581 716 def do_buildman(args, toolchains=None, make_func=None, brds=None, 582 717 clean_dir=False, test_thread_exceptions=False): 583 718 """The main control code for buildman ··· 653 788 builder = Builder(toolchains, output_dir, git_dir, 654 789 args.threads, args.jobs, checkout=True, 655 790 show_unknown=args.show_unknown, step=args.step, 656 - no_subdirs=args.no_subdirs, full_path=args.full_path, 657 - verbose_build=args.verbose_build, 658 - mrproper=args.mrproper, 791 + no_subdirs=args.no_subdirs, verbose_build=args.verbose_build, 792 + mrproper=args.mrproper, fallback_mrproper=args.fallback_mrproper, 659 793 per_board_out_dir=args.per_board_out_dir, 660 794 config_only=args.config_only, 661 795 squash_config_y=not args.preserve_config_y, ··· 675 809 force_config_on_failure=not args.quick, make_func=make_func) 676 810 677 811 TEST_BUILDER = builder 812 + 813 + if args.process_limit: 814 + wait_for_process_limit(args.process_limit) 678 815 679 816 return run_builder(builder, series.commits if series else None, 680 817 brds.get_selected_dict(), args)
+36 -36
tools/buildman/func_test.py
··· 807 807 params, warnings = self._boards.scan_defconfigs(src, src) 808 808 809 809 # We should get two boards 810 - self.assertEquals(2, len(params)) 810 + self.assertEqual(2, len(params)) 811 811 self.assertFalse(warnings) 812 812 first = 0 if params[0]['target'] == 'board0' else 1 813 813 board0 = params[first] 814 814 board2 = params[1 - first] 815 815 816 - self.assertEquals('arm', board0['arch']) 817 - self.assertEquals('armv7', board0['cpu']) 818 - self.assertEquals('-', board0['soc']) 819 - self.assertEquals('Tester', board0['vendor']) 820 - self.assertEquals('ARM Board 0', board0['board']) 821 - self.assertEquals('config0', board0['config']) 822 - self.assertEquals('board0', board0['target']) 816 + self.assertEqual('arm', board0['arch']) 817 + self.assertEqual('armv7', board0['cpu']) 818 + self.assertEqual('-', board0['soc']) 819 + self.assertEqual('Tester', board0['vendor']) 820 + self.assertEqual('ARM Board 0', board0['board']) 821 + self.assertEqual('config0', board0['config']) 822 + self.assertEqual('board0', board0['target']) 823 823 824 - self.assertEquals('powerpc', board2['arch']) 825 - self.assertEquals('ppc', board2['cpu']) 826 - self.assertEquals('mpc85xx', board2['soc']) 827 - self.assertEquals('Tester', board2['vendor']) 828 - self.assertEquals('PowerPC board 1', board2['board']) 829 - self.assertEquals('config2', board2['config']) 830 - self.assertEquals('board2', board2['target']) 824 + self.assertEqual('powerpc', board2['arch']) 825 + self.assertEqual('ppc', board2['cpu']) 826 + self.assertEqual('mpc85xx', board2['soc']) 827 + self.assertEqual('Tester', board2['vendor']) 828 + self.assertEqual('PowerPC board 1', board2['board']) 829 + self.assertEqual('config2', board2['config']) 830 + self.assertEqual('board2', board2['target']) 831 831 832 832 def test_output_is_new(self): 833 833 """Test detecting new changes to Kconfig""" ··· 898 898 params_list, warnings = self._boards.build_board_list(config_dir, src) 899 899 900 900 # There should be two boards no warnings 901 - self.assertEquals(2, len(params_list)) 901 + self.assertEqual(2, len(params_list)) 902 902 self.assertFalse(warnings) 903 903 904 904 # Set an invalid status line in the file ··· 907 907 for line in orig_data.splitlines(keepends=True)] 908 908 tools.write_file(main, ''.join(lines), binary=False) 909 909 params_list, warnings = self._boards.build_board_list(config_dir, src) 910 - self.assertEquals(2, len(params_list)) 910 + self.assertEqual(2, len(params_list)) 911 911 params = params_list[0] 912 912 if params['target'] == 'board2': 913 913 params = params_list[1] 914 - self.assertEquals('-', params['status']) 915 - self.assertEquals(["WARNING: Other: unknown status for 'board0'"], 914 + self.assertEqual('-', params['status']) 915 + self.assertEqual(["WARNING: Other: unknown status for 'board0'"], 916 916 warnings) 917 917 918 918 # Remove the status line (S:) from a file ··· 920 920 if not line.startswith('S:')] 921 921 tools.write_file(main, ''.join(lines), binary=False) 922 922 params_list, warnings = self._boards.build_board_list(config_dir, src) 923 - self.assertEquals(2, len(params_list)) 924 - self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings) 923 + self.assertEqual(2, len(params_list)) 924 + self.assertEqual(["WARNING: -: unknown status for 'board0'"], warnings) 925 925 926 926 # Remove the configs/ line (F:) from a file - this is the last line 927 927 data = ''.join(orig_data.splitlines(keepends=True)[:-1]) 928 928 tools.write_file(main, data, binary=False) 929 929 params_list, warnings = self._boards.build_board_list(config_dir, src) 930 - self.assertEquals(2, len(params_list)) 931 - self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings) 930 + self.assertEqual(2, len(params_list)) 931 + self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings) 932 932 933 933 # Mark a board as orphaned - this should give a warning 934 934 lines = ['S: Orphaned' if line.startswith('S') else line 935 935 for line in orig_data.splitlines(keepends=True)] 936 936 tools.write_file(main, ''.join(lines), binary=False) 937 937 params_list, warnings = self._boards.build_board_list(config_dir, src) 938 - self.assertEquals(2, len(params_list)) 939 - self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings) 938 + self.assertEqual(2, len(params_list)) 939 + self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings) 940 940 941 941 # Change the maintainer to '-' - this should give a warning 942 942 lines = ['M: -' if line.startswith('M') else line 943 943 for line in orig_data.splitlines(keepends=True)] 944 944 tools.write_file(main, ''.join(lines), binary=False) 945 945 params_list, warnings = self._boards.build_board_list(config_dir, src) 946 - self.assertEquals(2, len(params_list)) 947 - self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings) 946 + self.assertEqual(2, len(params_list)) 947 + self.assertEqual(["WARNING: -: unknown status for 'board0'"], warnings) 948 948 949 949 # Remove the maintainer line (M:) from a file 950 950 lines = [line for line in orig_data.splitlines(keepends=True) 951 951 if not line.startswith('M:')] 952 952 tools.write_file(main, ''.join(lines), binary=False) 953 953 params_list, warnings = self._boards.build_board_list(config_dir, src) 954 - self.assertEquals(2, len(params_list)) 955 - self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings) 954 + self.assertEqual(2, len(params_list)) 955 + self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings) 956 956 957 957 # Move the contents of the second file into this one, removing the 958 958 # second file, to check multiple records in a single file. ··· 960 960 tools.write_file(main, both_data, binary=False) 961 961 os.remove(other) 962 962 params_list, warnings = self._boards.build_board_list(config_dir, src) 963 - self.assertEquals(2, len(params_list)) 963 + self.assertEqual(2, len(params_list)) 964 964 self.assertFalse(warnings) 965 965 966 966 # Add another record, this should be ignored with a warning 967 967 extra = '\n\nAnother\nM: Fred\nF: configs/board9_defconfig\nS: other\n' 968 968 tools.write_file(main, both_data + extra, binary=False) 969 969 params_list, warnings = self._boards.build_board_list(config_dir, src) 970 - self.assertEquals(2, len(params_list)) 970 + self.assertEqual(2, len(params_list)) 971 971 self.assertFalse(warnings) 972 972 973 973 # Add another TARGET to the Kconfig ··· 983 983 tools.write_file(kc_file, orig_kc_data + extra) 984 984 params_list, warnings = self._boards.build_board_list(config_dir, src, 985 985 warn_targets=True) 986 - self.assertEquals(2, len(params_list)) 987 - self.assertEquals( 986 + self.assertEqual(2, len(params_list)) 987 + self.assertEqual( 988 988 ['WARNING: board2_defconfig: Duplicate TARGET_xxx: board2 and other'], 989 989 warnings) 990 990 ··· 994 994 tools.write_file(kc_file, b''.join(lines)) 995 995 params_list, warnings = self._boards.build_board_list(config_dir, src, 996 996 warn_targets=True) 997 - self.assertEquals(2, len(params_list)) 998 - self.assertEquals( 997 + self.assertEqual(2, len(params_list)) 998 + self.assertEqual( 999 999 ['WARNING: board2_defconfig: No TARGET_BOARD2 enabled'], 1000 1000 warnings) 1001 1001 tools.write_file(kc_file, orig_kc_data) ··· 1004 1004 data = ''.join(both_data.splitlines(keepends=True)[:-1]) 1005 1005 tools.write_file(main, data + 'N: oa.*2\n', binary=False) 1006 1006 params_list, warnings = self._boards.build_board_list(config_dir, src) 1007 - self.assertEquals(2, len(params_list)) 1007 + self.assertEqual(2, len(params_list)) 1008 1008 self.assertFalse(warnings) 1009 1009 1010 1010 def testRegenBoards(self):
+5 -1
tools/buildman/pyproject.toml
··· 8 8 authors = [ 9 9 { name="Simon Glass", email="sjg@chromium.org" }, 10 10 ] 11 - dependencies = ["u_boot_pylib >= 0.0.6", "patch-manager >= 0.0.6"] 11 + dependencies = [ 12 + "filelock >= 3.0.12", 13 + "u_boot_pylib >= 0.0.6", 14 + "patch-manager >= 0.0.6" 15 + ] 12 16 description = "Buildman build tool for U-Boot" 13 17 readme = "README.rst" 14 18 requires-python = ">=3.7"
+2
tools/buildman/requirements.txt
··· 1 + coverage==6.2 1 2 jsonschema==4.17.3 3 + pycryptodome==3.20 2 4 pyyaml==6.0 3 5 yamllint==1.26.3
+197 -1
tools/buildman/test.py
··· 2 2 # Copyright (c) 2012 The Chromium OS Authors. 3 3 # 4 4 5 + from filelock import FileLock 5 6 import os 6 7 import shutil 7 8 import sys 8 9 import tempfile 9 10 import time 10 11 import unittest 12 + from unittest.mock import patch 11 13 12 14 from buildman import board 13 15 from buildman import boards ··· 146 148 self.toolchains.Add('arm-linux-gcc', test=False) 147 149 self.toolchains.Add('sparc-linux-gcc', test=False) 148 150 self.toolchains.Add('powerpc-linux-gcc', test=False) 151 + self.toolchains.Add('/path/to/aarch64-linux-gcc', test=False) 149 152 self.toolchains.Add('gcc', test=False) 150 153 151 154 # Avoid sending any output ··· 155 158 self.base_dir = tempfile.mkdtemp() 156 159 if not os.path.isdir(self.base_dir): 157 160 os.mkdir(self.base_dir) 161 + 162 + self.cur_time = 0 163 + self.valid_pids = [] 164 + self.finish_time = None 165 + self.finish_pid = None 158 166 159 167 def tearDown(self): 160 168 shutil.rmtree(self.base_dir) ··· 584 592 if use_network: 585 593 with test_util.capture_sys_output() as (stdout, stderr): 586 594 url = self.toolchains.LocateArchUrl('arm') 587 - self.assertRegexpMatches(url, 'https://www.kernel.org/pub/tools/' 595 + self.assertRegex(url, 'https://www.kernel.org/pub/tools/' 588 596 'crosstool/files/bin/x86_64/.*/' 589 597 'x86_64-gcc-.*-nolibc[-_]arm-.*linux-gnueabi.tar.xz') 590 598 ··· 747 755 self.assertEqual([ 748 756 ['MARY="mary"', 'Missing expected line: CONFIG_MARY="mary"']], result) 749 757 758 + def get_procs(self): 759 + running_fname = os.path.join(self.base_dir, control.RUNNING_FNAME) 760 + items = tools.read_file(running_fname, binary=False).split() 761 + return [int(x) for x in items] 762 + 763 + def get_time(self): 764 + return self.cur_time 765 + 766 + def inc_time(self, amount): 767 + self.cur_time += amount 768 + 769 + # Handle a process exiting 770 + if self.finish_time == self.cur_time: 771 + self.valid_pids = [pid for pid in self.valid_pids 772 + if pid != self.finish_pid] 773 + 774 + def kill(self, pid, signal): 775 + if pid not in self.valid_pids: 776 + raise OSError('Invalid PID') 777 + 778 + def test_process_limit(self): 779 + """Test wait_for_process_limit() function""" 780 + tmpdir = self.base_dir 781 + 782 + with (patch('time.time', side_effect=self.get_time), 783 + patch('time.sleep', side_effect=self.inc_time), 784 + patch('os.kill', side_effect=self.kill)): 785 + # Grab the process. Since there is no other profcess, this should 786 + # immediately succeed 787 + control.wait_for_process_limit(1, tmpdir=tmpdir, pid=1) 788 + lines = terminal.get_print_test_lines() 789 + self.assertEqual(0, self.cur_time) 790 + self.assertEqual('Waiting for other buildman processes...', 791 + lines[0].text) 792 + self.assertEqual(self._col.RED, lines[0].colour) 793 + self.assertEqual(False, lines[0].newline) 794 + self.assertEqual(True, lines[0].bright) 795 + 796 + self.assertEqual('done...', lines[1].text) 797 + self.assertEqual(None, lines[1].colour) 798 + self.assertEqual(False, lines[1].newline) 799 + self.assertEqual(True, lines[1].bright) 800 + 801 + self.assertEqual('starting build', lines[2].text) 802 + self.assertEqual([1], control.read_procs(tmpdir)) 803 + self.assertEqual(None, lines[2].colour) 804 + self.assertEqual(False, lines[2].newline) 805 + self.assertEqual(True, lines[2].bright) 806 + 807 + # Try again, with a different PID...this should eventually timeout 808 + # and start the build anyway 809 + self.cur_time = 0 810 + self.valid_pids = [1] 811 + control.wait_for_process_limit(1, tmpdir=tmpdir, pid=2) 812 + lines = terminal.get_print_test_lines() 813 + self.assertEqual('Waiting for other buildman processes...', 814 + lines[0].text) 815 + self.assertEqual('timeout...', lines[1].text) 816 + self.assertEqual(None, lines[1].colour) 817 + self.assertEqual(False, lines[1].newline) 818 + self.assertEqual(True, lines[1].bright) 819 + self.assertEqual('starting build', lines[2].text) 820 + self.assertEqual([1, 2], control.read_procs(tmpdir)) 821 + self.assertEqual(control.RUN_WAIT_S, self.cur_time) 822 + 823 + # Check lock-busting 824 + self.cur_time = 0 825 + self.valid_pids = [1, 2] 826 + lock_fname = os.path.join(tmpdir, control.LOCK_FNAME) 827 + lock = FileLock(lock_fname) 828 + lock.acquire(timeout=1) 829 + control.wait_for_process_limit(1, tmpdir=tmpdir, pid=3) 830 + lines = terminal.get_print_test_lines() 831 + self.assertEqual('Waiting for other buildman processes...', 832 + lines[0].text) 833 + self.assertEqual('failed to get lock: busting...', lines[1].text) 834 + self.assertEqual(None, lines[1].colour) 835 + self.assertEqual(False, lines[1].newline) 836 + self.assertEqual(True, lines[1].bright) 837 + self.assertEqual('timeout...', lines[2].text) 838 + self.assertEqual('starting build', lines[3].text) 839 + self.assertEqual([1, 2, 3], control.read_procs(tmpdir)) 840 + self.assertEqual(control.RUN_WAIT_S, self.cur_time) 841 + lock.release() 842 + 843 + # Check handling of dead processes. Here we have PID 2 as a running 844 + # process, even though the PID file contains 1, 2 and 3. So we can 845 + # add one more PID, to make 2 and 4 846 + self.cur_time = 0 847 + self.valid_pids = [2] 848 + control.wait_for_process_limit(2, tmpdir=tmpdir, pid=4) 849 + lines = terminal.get_print_test_lines() 850 + self.assertEqual('Waiting for other buildman processes...', 851 + lines[0].text) 852 + self.assertEqual('done...', lines[1].text) 853 + self.assertEqual('starting build', lines[2].text) 854 + self.assertEqual([2, 4], control.read_procs(tmpdir)) 855 + self.assertEqual(0, self.cur_time) 856 + 857 + # Try again, with PID 2 quitting at time 50. This allows the new 858 + # build to start 859 + self.cur_time = 0 860 + self.valid_pids = [2, 4] 861 + self.finish_pid = 2 862 + self.finish_time = 50 863 + control.wait_for_process_limit(2, tmpdir=tmpdir, pid=5) 864 + lines = terminal.get_print_test_lines() 865 + self.assertEqual('Waiting for other buildman processes...', 866 + lines[0].text) 867 + self.assertEqual('done...', lines[1].text) 868 + self.assertEqual('starting build', lines[2].text) 869 + self.assertEqual([4, 5], control.read_procs(tmpdir)) 870 + self.assertEqual(self.finish_time, self.cur_time) 871 + 872 + def call_make_environment(self, tchn, in_env=None): 873 + """Call Toolchain.MakeEnvironment() and process the result 874 + 875 + Args: 876 + tchn (Toolchain): Toolchain to use 877 + in_env (dict): Input environment to use, None to use current env 878 + 879 + Returns: 880 + tuple: 881 + dict: Changes that MakeEnvironment has made to the environment 882 + key: Environment variable that was changed 883 + value: New value (for PATH this only includes components 884 + which were added) 885 + str: Full value of the new PATH variable 886 + """ 887 + env = tchn.MakeEnvironment(env=in_env) 888 + 889 + # Get the original environment 890 + orig_env = dict(os.environb if in_env is None else in_env) 891 + orig_path = orig_env[b'PATH'].split(b':') 892 + 893 + # Find new variables 894 + diff = dict((k, env[k]) for k in env if orig_env.get(k) != env[k]) 895 + 896 + # Find new / different path components 897 + diff_path = None 898 + new_path = None 899 + if b'PATH' in diff: 900 + new_path = diff[b'PATH'].split(b':') 901 + diff_paths = [p for p in new_path if p not in orig_path] 902 + diff_path = b':'.join(p for p in new_path if p not in orig_path) 903 + if diff_path: 904 + diff[b'PATH'] = diff_path 905 + else: 906 + del diff[b'PATH'] 907 + return diff, new_path 908 + 909 + def test_toolchain_env(self): 910 + """Test PATH and other environment settings for toolchains""" 911 + # Use a toolchain which has a path 912 + tchn = self.toolchains.Select('aarch64') 913 + 914 + # Normal case 915 + diff = self.call_make_environment(tchn)[0] 916 + self.assertEqual( 917 + {b'CROSS_COMPILE': b'/path/to/aarch64-linux-', b'LC_ALL': b'C'}, 918 + diff) 919 + 920 + # When overriding the toolchain, only LC_ALL should be set 921 + tchn.override_toolchain = True 922 + diff = self.call_make_environment(tchn)[0] 923 + self.assertEqual({b'LC_ALL': b'C'}, diff) 924 + 925 + # Test that virtualenv is handled correctly 926 + tchn.override_toolchain = False 927 + sys.prefix = '/some/venv' 928 + env = dict(os.environb) 929 + env[b'PATH'] = b'/some/venv/bin:other/things' 930 + tchn.path = '/my/path' 931 + diff, diff_path = self.call_make_environment(tchn, env) 932 + 933 + self.assertNotIn(b'PATH', diff) 934 + self.assertEqual(None, diff_path) 935 + self.assertEqual( 936 + {b'CROSS_COMPILE': b'/my/path/aarch64-linux-', b'LC_ALL': b'C'}, 937 + diff) 938 + 939 + # Handle a toolchain wrapper 940 + tchn.path = '' 941 + bsettings.add_section('toolchain-wrapper') 942 + bsettings.set_item('toolchain-wrapper', 'my-wrapper', 'fred') 943 + diff = self.call_make_environment(tchn)[0] 944 + self.assertEqual( 945 + {b'CROSS_COMPILE': b'fred aarch64-linux-', b'LC_ALL': b'C'}, diff) 750 946 751 947 if __name__ == "__main__": 752 948 unittest.main()
+11 -13
tools/buildman/toolchain.py
··· 90 90 if self.arch == 'sandbox' and override_toolchain: 91 91 self.gcc = override_toolchain 92 92 93 - env = self.MakeEnvironment(False) 93 + env = self.MakeEnvironment() 94 94 95 95 # As a basic sanity check, run the C compiler with --version 96 96 cmd = [fname, '--version'] ··· 172 172 else: 173 173 raise ValueError('Unknown arg to GetEnvArgs (%d)' % which) 174 174 175 - def MakeEnvironment(self, full_path): 175 + def MakeEnvironment(self, env=None): 176 176 """Returns an environment for using the toolchain. 177 177 178 - Thie takes the current environment and adds CROSS_COMPILE so that 178 + This takes the current environment and adds CROSS_COMPILE so that 179 179 the tool chain will operate correctly. This also disables localized 180 - output and possibly unicode encoded output of all build tools by 180 + output and possibly Unicode encoded output of all build tools by 181 181 adding LC_ALL=C. 182 182 183 183 Note that os.environb is used to obtain the environment, since in some ··· 188 188 569-570: surrogates not allowed 189 189 190 190 Args: 191 - full_path: Return the full path in CROSS_COMPILE and don't set 192 - PATH 191 + env (dict of bytes): Original environment, used for testing 192 + 193 193 Returns: 194 194 Dict containing the (bytes) environment to use. This is based on the 195 - current environment, with changes as needed to CROSS_COMPILE, PATH 196 - and LC_ALL. 195 + current environment, with changes as needed to CROSS_COMPILE and 196 + LC_ALL. 197 197 """ 198 - env = dict(os.environb) 198 + env = dict(env or os.environb) 199 + 199 200 wrapper = self.GetWrapper() 200 201 201 202 if self.override_toolchain: 202 203 # We'll use MakeArgs() to provide this 203 204 pass 204 - elif full_path: 205 + else: 205 206 env[b'CROSS_COMPILE'] = tools.to_bytes( 206 207 wrapper + os.path.join(self.path, self.cross)) 207 - else: 208 - env[b'CROSS_COMPILE'] = tools.to_bytes(wrapper + self.cross) 209 - env[b'PATH'] = tools.to_bytes(self.path) + b':' + env[b'PATH'] 210 208 211 209 env[b'LC_ALL'] = b'C' 212 210
+8 -2
tools/patman/func_test.py
··· 211 211 'u-boot': ['u-boot@lists.denx.de'], 212 212 'simon': [self.leb], 213 213 'fred': [self.fred], 214 + 'joe': [self.joe], 214 215 } 215 216 216 217 text = self._get_text('test01.txt') ··· 259 260 self.assertEqual('Postfix:\t some-branch', next(lines)) 260 261 self.assertEqual('Cover: 4 lines', next(lines)) 261 262 self.assertEqual(' Cc: %s' % self.fred, next(lines)) 263 + self.assertEqual(' Cc: %s' % self.joe, next(lines)) 262 264 self.assertEqual(' Cc: %s' % self.leb, 263 265 next(lines)) 264 266 self.assertEqual(' Cc: %s' % mel, next(lines)) ··· 272 274 273 275 self.assertEqual(('%s %s\0%s' % (args[0], rick, stefan)), cc_lines[0]) 274 276 self.assertEqual( 275 - '%s %s\0%s\0%s\0%s' % (args[1], self.fred, self.leb, rick, stefan), 277 + '%s %s\0%s\0%s\0%s\0%s' % (args[1], self.fred, self.joe, self.leb, 278 + rick, stefan), 276 279 cc_lines[1]) 277 280 278 281 expected = ''' ··· 290 293 change 291 294 - Some changes 292 295 - Some notes for the cover letter 296 + - fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base() 293 297 294 298 Simon Glass (2): 295 299 pci: Correct cast for sandbox ··· 339 343 - Multi 340 344 line 341 345 change 346 + - New 342 347 - Some changes 343 348 344 349 Changes in v2: ··· 540 545 with open('.patman', 'w', buffering=1) as f: 541 546 f.write('[settings]\n' 542 547 'get_maintainer_script: dummy-script.sh\n' 543 - 'check_patch: False\n') 548 + 'check_patch: False\n' 549 + 'add_maintainers: True\n') 544 550 with open('dummy-script.sh', 'w', buffering=1) as f: 545 551 f.write('#!/usr/bin/env python\n' 546 552 'print("hello@there.com")\n')
+7
tools/patman/patchstream.py
··· 475 475 elif name == 'changes': 476 476 self.in_change = 'Commit' 477 477 self.change_version = self._parse_version(value, line) 478 + elif name == 'cc': 479 + self.commit.add_cc(value.split(',')) 480 + elif name == 'added-in': 481 + version = self._parse_version(value, line) 482 + self.commit.add_change(version, '- New') 483 + self.series.AddChange(version, None, '- %s' % 484 + self.commit.subject) 478 485 else: 479 486 self._add_warn('Line %d: Ignoring Commit-%s' % 480 487 (self.linenum, name))
+14 -1
tools/patman/patman.rst
··· 350 350 - This line will only appear in the cover letter 351 351 <blank line> 352 352 353 - Patch-cc: Their Name <email> 353 + Commit-added-in: n 354 + Add a change noting the version this commit was added in. This is 355 + equivalent to:: 356 + 357 + Commit-changes: n 358 + - New 359 + 360 + Cover-changes: n 361 + - <commit subject> 362 + 363 + It is a convenient shorthand for suppressing the '(no changes in vN)' 364 + message. 365 + 366 + Patch-cc / Commit-cc: Their Name <email> 354 367 This copies a single patch to another email address. Note that the 355 368 Cc: used by git send-email is ignored by patman, but will be 356 369 interpreted by git send-email if you use it.
+4 -4
tools/patman/settings.py
··· 59 59 60 60 # Check to make sure that bogus project gets general alias. 61 61 >>> config = _ProjectConfigParser("zzz") 62 - >>> config.readfp(StringIO(sample_config)) 62 + >>> config.read_file(StringIO(sample_config)) 63 63 >>> str(config.get("alias", "enemies")) 64 64 'Evil <evil@example.com>' 65 65 66 66 # Check to make sure that alias gets overridden by project. 67 67 >>> config = _ProjectConfigParser("sm") 68 - >>> config.readfp(StringIO(sample_config)) 68 + >>> config.read_file(StringIO(sample_config)) 69 69 >>> str(config.get("alias", "enemies")) 70 70 'Green G. <ugly@example.com>' 71 71 72 72 # Check to make sure that settings get merged with project. 73 73 >>> config = _ProjectConfigParser("linux") 74 - >>> config.readfp(StringIO(sample_config)) 74 + >>> config.read_file(StringIO(sample_config)) 75 75 >>> sorted((str(a), str(b)) for (a, b) in config.items("settings")) 76 76 [('am_hero', 'True'), ('check_patch_use_tree', 'True'), ('process_tags', 'False')] 77 77 78 78 # Check to make sure that settings works with unknown project. 79 79 >>> config = _ProjectConfigParser("unknown") 80 - >>> config.readfp(StringIO(sample_config)) 80 + >>> config.read_file(StringIO(sample_config)) 81 81 >>> sorted((str(a), str(b)) for (a, b) in config.items("settings")) 82 82 [('am_hero', 'True')] 83 83 """
+2
tools/patman/test/0002-fdt-Correct-cast-for-sandbox-in-fdtdec_setup_mem_siz.patch
··· 21 21 Cover-letter-cc: Lord Mëlchett <clergy@palace.gov> 22 22 Series-version: 3 23 23 Patch-cc: fred 24 + Commit-cc: joe 24 25 Series-process-log: sort, uniq 26 + Commit-added-in: 4 25 27 Series-changes: 4 26 28 - Some changes 27 29 - Multi
+2
tools/patman/test/test01.txt
··· 49 49 Cover-letter-cc: Lord Mëlchett <clergy@palace.gov> 50 50 Series-version: 3 51 51 Patch-cc: fred 52 + Commit-cc: joe 52 53 Series-process-log: sort, uniq 54 + Commit-added-in: 4 53 55 Series-changes: 4 54 56 - Some changes 55 57 - Multi
+5 -2
tools/u_boot_pylib/terminal.py
··· 164 164 global last_print_len 165 165 166 166 if last_print_len: 167 - print('\r%s\r' % (' '* last_print_len), end='', flush=True) 168 - last_print_len = None 167 + if print_test_mode: 168 + print_test_list.append(PrintLine(None, None, None, None)) 169 + else: 170 + print('\r%s\r' % (' '* last_print_len), end='', flush=True) 171 + last_print_len = None 169 172 170 173 def set_print_test_mode(enable=True): 171 174 """Go into test mode, where all printing is recorded"""
+8 -3
tools/u_boot_pylib/test_util.py
··· 60 60 prefix = '' 61 61 if build_dir: 62 62 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir 63 - cmd = ('%spython3-coverage run ' 64 - '--omit "%s" %s %s %s %s' % (prefix, ','.join(glob_list), 63 + 64 + # Detect a Python virtualenv and use 'coverage' instead 65 + covtool = ('python3-coverage' if sys.prefix == sys.base_prefix else 66 + 'coverage') 67 + 68 + cmd = ('%s%s run ' 69 + '--omit "%s" %s %s %s %s' % (prefix, covtool, ','.join(glob_list), 65 70 prog, extra_args or '', test_cmd, 66 71 single_thread or '-P1')) 67 72 os.system(cmd) 68 - stdout = command.output('python3-coverage', 'report') 73 + stdout = command.output(covtool, 'report') 69 74 lines = stdout.splitlines() 70 75 if required: 71 76 # Convert '/path/to/name.py' just the module name 'name'