···230230231231 oftree_reset();
232232233233- /* Save the pre-reloc driver model and start a new one */
234234- gd->dm_root_f = gd->dm_root;
233233+ /* Drop the pre-reloc driver model and start a new one */
235234 gd->dm_root = NULL;
236235#ifdef CONFIG_TIMER
237236 gd->timer = NULL;
···5656 out - it will do nothing when called.
57575858config SPL_DM_WARN
5959- bool "Enable warnings in driver model wuth SPL"
5959+ bool "Enable warnings in driver model in SPL"
6060 depends on SPL_DM
6161 help
6262 Enable this to see warnings related to driver model in SPL
+1-1
drivers/core/device.c
···58585959 ret = uclass_get(drv->id, &uc);
6060 if (ret) {
6161- debug("Missing uclass for driver %s\n", drv->name);
6161+ dm_warn("Missing uclass for driver %s\n", drv->name);
6262 return ret;
6363 }
6464
+4-3
drivers/core/fdtaddr.c
···1515#include <asm/global_data.h>
1616#include <asm/io.h>
1717#include <dm/device-internal.h>
1818+#include <dm/util.h>
18191920DECLARE_GLOBAL_DATA_PTR;
2021···32333334 na = fdt_address_cells(gd->fdt_blob, parent);
3435 if (na < 1) {
3535- debug("bad #address-cells\n");
3636+ dm_warn("bad #address-cells\n");
3637 return FDT_ADDR_T_NONE;
3738 }
38393940 ns = fdt_size_cells(gd->fdt_blob, parent);
4041 if (ns < 0) {
4141- debug("bad #size-cells\n");
4242+ dm_warn("bad #size-cells\n");
4243 return FDT_ADDR_T_NONE;
4344 }
44454546 reg = fdt_getprop(gd->fdt_blob, offset, "reg", &len);
4647 if (!reg || (len <= (index * sizeof(fdt32_t) * (na + ns)))) {
4747- debug("Req index out of range\n");
4848+ dm_warn("Req index out of range\n");
4849 return FDT_ADDR_T_NONE;
4950 }
5051
+3-4
drivers/core/lists.c
···144144145145 drv = lists_driver_lookup_name(drv_name);
146146 if (!drv) {
147147- debug("Cannot find driver '%s'\n", drv_name);
147147+ dm_warn("Cannot find driver '%s'\n", drv_name);
148148 return -ENOENT;
149149 }
150150 ret = device_bind_with_driver_data(parent, drv, dev_name, 0 /* data */,
···246246 }
247247248248 if (entry->of_match)
249249- log_debug(" - found match at '%s': '%s' matches '%s'\n",
250250- entry->name, entry->of_match->compatible,
251251- id->compatible);
249249+ log_debug(" - found match at driver '%s' for '%s'\n",
250250+ entry->name, id->compatible);
252251 ret = device_bind_with_driver_data(parent, entry, name,
253252 id ? id->data : 0, node,
254253 &dev);
···1717#include <asm/io.h>
1818#include <dm/of_addr.h>
1919#include <dm/devres.h>
2020+#include <dm/util.h>
2021#include <linux/ioport.h>
2122#include <linux/compat.h>
2223#include <linux/err.h>
···139140 ret = of_address_to_resource(ofnode_to_np(node),
140141 index, &r);
141142 if (ret) {
142142- debug("%s: Could not read resource of range %d (ret = %d)\n",
143143- ofnode_get_name(node), index, ret);
143143+ dm_warn("%s: Could not read resource of range %d (ret = %d)\n",
144144+ ofnode_get_name(node), index, ret);
144145 return ret;
145146 }
146147···154155 addr_len, size_len,
155156 &sz, true);
156157 if (range->start == FDT_ADDR_T_NONE) {
157157- debug("%s: Could not read start of range %d\n",
158158- ofnode_get_name(node), index);
158158+ dm_warn("%s: Could not read start of range %d\n",
159159+ ofnode_get_name(node), index);
159160 return -EINVAL;
160161 }
161162···173174174175 addr_len = ofnode_read_simple_addr_cells(ofnode_get_parent(node));
175176 if (addr_len < 0) {
176176- debug("%s: Error while reading the addr length (ret = %d)\n",
177177- ofnode_get_name(node), addr_len);
177177+ dm_warn("%s: Error while reading the addr length (ret = %d)\n",
178178+ ofnode_get_name(node), addr_len);
178179 return addr_len;
179180 }
180181181182 size_len = ofnode_read_simple_size_cells(ofnode_get_parent(node));
182183 if (size_len < 0) {
183183- debug("%s: Error while reading the size length: (ret = %d)\n",
184184- ofnode_get_name(node), size_len);
184184+ dm_warn("%s: Error while reading the size length: (ret = %d)\n",
185185+ ofnode_get_name(node), size_len);
185186 return size_len;
186187 }
187188···250251251252 addr_len = ofnode_read_simple_addr_cells(ofnode_get_parent(node));
252253 if (addr_len < 0) {
253253- debug("%s: Error while reading the addr length (ret = %d)\n",
254254- ofnode_get_name(node), addr_len);
254254+ dm_warn("%s: Error while reading the addr length (ret = %d)\n",
255255+ ofnode_get_name(node), addr_len);
255256 return addr_len;
256257 }
257258258259 size_len = ofnode_read_simple_size_cells(ofnode_get_parent(node));
259260 if (size_len < 0) {
260260- debug("%s: Error while reading the size length: (ret = %d)\n",
261261- ofnode_get_name(node), size_len);
261261+ dm_warn("%s: Error while reading the size length: (ret = %d)\n",
262262+ ofnode_get_name(node), size_len);
262263 return size_len;
263264 }
264265265266 both_len = addr_len + size_len;
266267 if (!both_len) {
267267- debug("%s: Both addr and size length are zero\n",
268268- ofnode_get_name(node));
268268+ dm_warn("%s: Both addr and size length are zero\n",
269269+ ofnode_get_name(node));
269270 return -EINVAL;
270271 }
271272272273 len = ofnode_read_size(node, "reg");
273274 if (len < 0) {
274274- debug("%s: Error while reading reg size (ret = %d)\n",
275275- ofnode_get_name(node), len);
275275+ dm_warn("%s: Error while reading reg size (ret = %d)\n",
276276+ ofnode_get_name(node), len);
276277 return len;
277278 }
278279 len /= sizeof(fdt32_t);
279280 count = len / both_len;
280281 if (!count) {
281281- debug("%s: Not enough data in reg property\n",
282282- ofnode_get_name(node));
282282+ dm_warn("%s: Not enough data in reg property\n",
283283+ ofnode_get_name(node));
283284 return -EINVAL;
284285 }
285286···424425 void *ptr;
425426426427 if (do_range_check() && range_num >= map->range_count) {
427427- debug("%s: range index %d larger than range count\n",
428428- __func__, range_num);
428428+ dm_warn("%s: range index %d larger than range count\n",
429429+ __func__, range_num);
429430 return -ERANGE;
430431 }
431432 range = &map->ranges[range_num];
···433434 offset <<= map->reg_offset_shift;
434435 if (do_range_check() &&
435436 (offset + val_len > range->size || offset + val_len < offset)) {
436436- debug("%s: offset/size combination invalid\n", __func__);
437437+ dm_warn("%s: offset/size combination invalid\n", __func__);
437438 return -ERANGE;
438439 }
439440···455456 break;
456457#endif
457458 default:
458458- debug("%s: regmap size %zu unknown\n", __func__, val_len);
459459+ dm_warn("%s: regmap size %zu unknown\n", __func__, val_len);
459460 return -EINVAL;
460461 }
461462···564565 void *ptr;
565566566567 if (range_num >= map->range_count) {
567567- debug("%s: range index %d larger than range count\n",
568568- __func__, range_num);
568568+ dm_warn("%s: range index %d larger than range count\n",
569569+ __func__, range_num);
569570 return -ERANGE;
570571 }
571572 range = &map->ranges[range_num];
572573573574 offset <<= map->reg_offset_shift;
574575 if (offset + val_len > range->size || offset + val_len < offset) {
575575- debug("%s: offset/size combination invalid\n", __func__);
576576+ dm_warn("%s: offset/size combination invalid\n", __func__);
576577 return -ERANGE;
577578 }
578579···594595 break;
595596#endif
596597 default:
597597- debug("%s: regmap size %zu unknown\n", __func__, val_len);
598598+ dm_warn("%s: regmap size %zu unknown\n", __func__, val_len);
598599 return -EINVAL;
599600 }
600601···630631 u.v64 = val;
631632 break;
632633 default:
633633- debug("%s: regmap size %zu unknown\n", __func__,
634634- (size_t)map->width);
634634+ dm_warn("%s: regmap size %zu unknown\n", __func__,
635635+ (size_t)map->width);
635636 return -EINVAL;
636637 }
637638
+7-7
drivers/core/root.c
···207207 err = lists_bind_fdt(parent, node, NULL, NULL, pre_reloc_only);
208208 if (err && !ret) {
209209 ret = err;
210210- debug("%s: ret=%d\n", node_name, ret);
210210+ dm_warn("%s: ret=%d\n", node_name, ret);
211211 }
212212 }
213213···248248249249 ret = dm_scan_fdt(pre_reloc_only);
250250 if (ret) {
251251- debug("dm_scan_fdt() failed: %d\n", ret);
251251+ dm_warn("dm_scan_fdt() failed: %d\n", ret);
252252 return ret;
253253 }
254254···256256 for (i = 0; i < ARRAY_SIZE(nodes); i++) {
257257 ret = dm_scan_fdt_ofnode_path(nodes[i], pre_reloc_only);
258258 if (ret) {
259259- debug("dm_scan_fdt() scan for %s failed: %d\n",
260260- nodes[i], ret);
259259+ dm_warn("dm_scan_fdt() scan for %s failed: %d\n",
260260+ nodes[i], ret);
261261 return ret;
262262 }
263263 }
···320320321321 ret = dm_scan_plat(pre_reloc_only);
322322 if (ret) {
323323- debug("dm_scan_plat() failed: %d\n", ret);
323323+ dm_warn("dm_scan_plat() failed: %d\n", ret);
324324 return ret;
325325 }
326326327327 if (CONFIG_IS_ENABLED(OF_REAL)) {
328328 ret = dm_extended_scan(pre_reloc_only);
329329 if (ret) {
330330- debug("dm_extended_scan() failed: %d\n", ret);
330330+ dm_warn("dm_extended_scan() failed: %d\n", ret);
331331 return ret;
332332 }
333333 }
···345345346346 ret = dm_init(CONFIG_IS_ENABLED(OF_LIVE));
347347 if (ret) {
348348- debug("dm_init() failed: %d\n", ret);
348348+ dm_warn("dm_init() failed: %d\n", ret);
349349 return ret;
350350 }
351351 if (!CONFIG_IS_ENABLED(OF_PLATDATA_INST)) {
+2-2
drivers/core/uclass.c
···5959 *ucp = NULL;
6060 uc_drv = lists_uclass_lookup(id);
6161 if (!uc_drv) {
6262- debug("Cannot find uclass for id %d: please add the UCLASS_DRIVER() declaration for this UCLASS_... id\n",
6363- id);
6262+ dm_warn("Cannot find uclass for id %d: please add the UCLASS_DRIVER() declaration for this UCLASS_... id\n",
6363+ id);
6464 /*
6565 * Use a strange error to make this case easier to find. When
6666 * a uclass is not available it can prevent driver model from
-4
include/asm-generic/global_data.h
···196196 */
197197 struct udevice *dm_root;
198198 /**
199199- * @dm_root_f: pre-relocation root instance
200200- */
201201- struct udevice *dm_root_f;
202202- /**
203199 * @uclass_root_s:
204200 * head of core tree when uclasses are not in read-only memory.
205201 *
···711711 information about what needs to be fixed. See missing-blob-help for the
712712 message for each tag.
713713714714+assume-size:
715715+ Sets the assumed size of a blob entry if it is missing. This allows for a
716716+ check that the rest of the image fits into the available space, even when
717717+ the contents are not available. If the entry is missing, Binman will use
718718+ this assumed size for the entry size, including creating a fake file of that
719719+ size if requested.
720720+714721no-expanded:
715722 By default binman substitutes entries with expanded versions if available,
716723 so that a `u-boot` entry type turns into `u-boot-expanded`, for example. The
+86-29
tools/binman/entries.rst
···470470471471.. _etype_efi_capsule:
472472473473-Entry: capsule: Entry for generating EFI Capsule files
474474-------------------------------------------------------
473473+Entry: efi-capsule: Generate EFI capsules
474474+-----------------------------------------
475475476476-The parameters needed for generation of the capsules can be provided
477477-as properties in the entry.
476476+The parameters needed for generation of the capsules can
477477+be provided as properties in the entry.
478478479479Properties / Entry arguments:
480480 - image-index: Unique number for identifying corresponding
···495495 file. Mandatory property for generating signed capsules.
496496 - oem-flags - OEM flags to be passed through capsule header.
497497498498- Since this is a subclass of Entry_section, all properties of the parent
499499- class also apply here. Except for the properties stated as mandatory, the
500500- rest of the properties are optional.
498498+Since this is a subclass of Entry_section, all properties of the parent
499499+class also apply here. Except for the properties stated as mandatory, the
500500+rest of the properties are optional.
501501502502For more details on the description of the capsule format, and the capsule
503503update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
···510510A typical capsule entry node would then look something like this::
511511512512 capsule {
513513- type = "efi-capsule";
514514- image-index = <0x1>;
515515- /* Image GUID for testing capsule update */
516516- image-guid = SANDBOX_UBOOT_IMAGE_GUID;
517517- hardware-instance = <0x0>;
518518- private-key = "path/to/the/private/key";
519519- public-key-cert = "path/to/the/public-key-cert";
520520- oem-flags = <0x8000>;
513513+ type = "efi-capsule";
514514+ image-index = <0x1>;
515515+ /* Image GUID for testing capsule update */
516516+ image-guid = SANDBOX_UBOOT_IMAGE_GUID;
517517+ hardware-instance = <0x0>;
518518+ private-key = "path/to/the/private/key";
519519+ public-key-cert = "path/to/the/public-key-cert";
520520+ oem-flags = <0x8000>;
521521522522- u-boot {
523523- };
522522+ u-boot {
523523+ };
524524 };
525525526526In the above example, the capsule payload is the U-Boot image. The
···534534535535.. _etype_efi_empty_capsule:
536536537537-Entry: efi-empty-capsule: Entry for generating EFI Empty Capsule files
538538-----------------------------------------------------------------------
537537+Entry: efi-empty-capsule: Generate EFI empty capsules
538538+-----------------------------------------------------
539539540540The parameters needed for generation of the empty capsules can
541541be provided as properties in the entry.
···551551specification`_. For more information on the empty capsule, refer the
552552sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
553553554554-A typical accept empty capsule entry node would then look something
555555-like this::
554554+A typical accept empty capsule entry node would then look something like
555555+this::
556556557557 empty-capsule {
558558- type = "efi-empty-capsule";
559559- /* GUID of the image being accepted */
560560- image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
561561- capsule-type = "accept";
558558+ type = "efi-empty-capsule";
559559+ /* GUID of image being accepted */
560560+ image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
561561+ capsule-type = "accept";
562562 };
563563564564-A typical revert empty capsule entry node would then look something
565565-like this::
564564+A typical revert empty capsule entry node would then look something like
565565+this::
566566567567 empty-capsule {
568568- type = "efi-empty-capsule";
569569- capsule-type = "revert";
568568+ type = "efi-empty-capsule";
569569+ capsule-type = "revert";
570570 };
571571572572The empty capsules do not have any input payload image.
···15211521152215221523152315241524+.. _etype_nxp_imx8mcst:
15251525+15261526+Entry: nxp-imx8mcst: NXP i.MX8M CST .cfg file generator and cst invoker
15271527+-----------------------------------------------------------------------
15281528+15291529+Properties / Entry arguments:
15301530+ - nxp,loader-address - loader address (SPL text base)
15311531+15321532+15331533+15341534+.. _etype_nxp_imx8mimage:
15351535+15361536+Entry: nxp-imx8mimage: NXP i.MX8M imx8mimage .cfg file generator and mkimage invoker
15371537+------------------------------------------------------------------------------------
15381538+15391539+Properties / Entry arguments:
15401540+ - nxp,boot-from - device to boot from (e.g. 'sd')
15411541+ - nxp,loader-address - loader address (SPL text base)
15421542+ - nxp,rom-version - BootROM version ('2' for i.MX8M Nano and Plus)
15431543+15441544+15451545+15241546.. _etype_opensbi:
1525154715261548Entry: opensbi: RISC-V OpenSBI fw_dynamic blob
···19291951 - content: List of phandles to entries to sign
19301952 - keyfile: Filename of file containing key to sign binary with
19311953 - sha: Hash function to be used for signing
19541954+ - auth-in-place: This is an integer field that contains two pieces
19551955+ of information:
19561956+19571957+ - Lower Byte - Remains 0x02 as per our use case
19581958+ ( 0x02: Move the authenticated binary back to the header )
19591959+ - Upper Byte - The Host ID of the core owning the firewall
1932196019331961Output files:
19341962 - input.<unique_name> - input file passed to openssl
···19361964 used as the config file)
19371965 - cert.<unique_name> - output file generated by openssl (which is
19381966 used as the entry contents)
19671967+19681968+Depending on auth-in-place information in the inputs, we read the
19691969+firewall nodes that describe the configurations of firewall that TIFS
19701970+will be doing after reading the certificate.
19711971+19721972+The syntax of the firewall nodes are as such::
19731973+19741974+ firewall-257-0 {
19751975+ id = <257>; /* The ID of the firewall being configured */
19761976+ region = <0>; /* Region number to configure */
19771977+19781978+ control = /* The control register */
19791979+ <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>;
19801980+19811981+ permissions = /* The permission registers */
19821982+ <((FWPRIVID_ALL << FWPRIVID_SHIFT) |
19831983+ FWPERM_SECURE_PRIV_RWCD |
19841984+ FWPERM_SECURE_USER_RWCD |
19851985+ FWPERM_NON_SECURE_PRIV_RWCD |
19861986+ FWPERM_NON_SECURE_USER_RWCD)>;
19871987+19881988+ /* More defines can be found in k3-security.h */
19891989+19901990+ start_address = /* The Start Address of the firewall */
19911991+ <0x0 0x0>;
19921992+ end_address = /* The End Address of the firewall */
19931993+ <0xff 0xffffffff>;
19941994+ };
19951995+1939199619401997openssl signs the provided data, using the TI templated config file and
19411998writes the signature in this entry. This allows verification that the
+2-1
tools/binman/entry.py
···315315 self.overlap = fdt_util.GetBool(self._node, 'overlap')
316316 if self.overlap:
317317 self.required_props += ['offset', 'size']
318318+ self.assume_size = fdt_util.GetInt(self._node, 'assume-size', 0)
318319319320 # This is only supported by blobs and sections at present
320321 self.compress = fdt_util.GetString(self._node, 'compress', 'none')
···812813 as missing
813814 """
814815 print('''Binman Entry Documentation
815815-===========================
816816+==========================
816817817818This file describes the entry types supported by binman. These entry types can
818819be placed in an image one by one to build up a final firmware image. It is
+3-3
tools/binman/entry_test.py
···103103 ent = entry.Entry.Create(None, self.GetNode(), 'missing',
104104 missing_etype=True)
105105 self.assertTrue(isinstance(ent, Entry_blob))
106106- self.assertEquals('missing', ent.etype)
106106+ self.assertEqual('missing', ent.etype)
107107108108 def testDecompressData(self):
109109 """Test the DecompressData() method of the base class"""
···111111 base.compress = 'lz4'
112112 bintools = {}
113113 base.comp_bintool = base.AddBintool(bintools, '_testing')
114114- self.assertEquals(tools.get_bytes(0, 1024), base.CompressData(b'abc'))
115115- self.assertEquals(tools.get_bytes(0, 1024), base.DecompressData(b'abc'))
114114+ self.assertEqual(tools.get_bytes(0, 1024), base.CompressData(b'abc'))
115115+ self.assertEqual(tools.get_bytes(0, 1024), base.DecompressData(b'abc'))
116116117117 def testLookupOffset(self):
118118 """Test the lookup_offset() method of the base class"""
+6-1
tools/binman/etype/blob.py
···4848 self.external and (self.optional or self.section.GetAllowMissing()))
4949 # Allow the file to be missing
5050 if not self._pathname:
5151+ if not fake_size and self.assume_size:
5252+ fake_size = self.assume_size
5153 self._pathname, faked = self.check_fake_fname(self._filename,
5254 fake_size)
5355 self.missing = True
5456 if not faked:
5555- self.SetContents(b'')
5757+ content_size = 0
5858+ if self.assume_size: # Ensure we get test coverage on next line
5959+ content_size = self.assume_size
6060+ self.SetContents(tools.get_bytes(0, content_size))
5661 return True
57625863 self.ReadBlobContents()
+20-20
tools/binman/etype/efi_capsule.py
···3636 be provided as properties in the entry.
37373838 Properties / Entry arguments:
3939- - image-index: Unique number for identifying corresponding
4040- payload image. Number between 1 and descriptor count, i.e.
4141- the total number of firmware images that can be updated. Mandatory
4242- property.
4343- - image-guid: Image GUID which will be used for identifying the
4444- updatable image on the board. Mandatory property.
4545- - hardware-instance: Optional number for identifying unique
4646- hardware instance of a device in the system. Default value of 0
4747- for images where value is not to be used.
4848- - fw-version: Value of image version that can be put on the capsule
4949- through the Firmware Management Protocol(FMP) header.
5050- - monotonic-count: Count used when signing an image.
5151- - private-key: Path to PEM formatted .key private key file. Mandatory
5252- property for generating signed capsules.
5353- - public-key-cert: Path to PEM formatted .crt public key certificate
5454- file. Mandatory property for generating signed capsules.
5555- - oem-flags - OEM flags to be passed through capsule header.
3939+ - image-index: Unique number for identifying corresponding
4040+ payload image. Number between 1 and descriptor count, i.e.
4141+ the total number of firmware images that can be updated. Mandatory
4242+ property.
4343+ - image-guid: Image GUID which will be used for identifying the
4444+ updatable image on the board. Mandatory property.
4545+ - hardware-instance: Optional number for identifying unique
4646+ hardware instance of a device in the system. Default value of 0
4747+ for images where value is not to be used.
4848+ - fw-version: Value of image version that can be put on the capsule
4949+ through the Firmware Management Protocol(FMP) header.
5050+ - monotonic-count: Count used when signing an image.
5151+ - private-key: Path to PEM formatted .key private key file. Mandatory
5252+ property for generating signed capsules.
5353+ - public-key-cert: Path to PEM formatted .crt public key certificate
5454+ file. Mandatory property for generating signed capsules.
5555+ - oem-flags - OEM flags to be passed through capsule header.
56565757 Since this is a subclass of Entry_section, all properties of the parent
5858 class also apply here. Except for the properties stated as mandatory, the
···6666 properties in the entry. The payload to be used in the capsule is to be
6767 provided as a subnode of the capsule entry.
68686969- A typical capsule entry node would then look something like this
6969+ A typical capsule entry node would then look something like this::
70707171- capsule {
7171+ capsule {
7272 type = "efi-capsule";
7373 image-index = <0x1>;
7474 /* Image GUID for testing capsule update */
···80808181 u-boot {
8282 };
8383- };
8383+ };
84848585 In the above example, the capsule payload is the U-Boot image. The
8686 capsule entry would read the contents of the payload and put them
+12-10
tools/binman/etype/efi_empty_capsule.py
···1919 be provided as properties in the entry.
20202121 Properties / Entry arguments:
2222- - image-guid: Image GUID which will be used for identifying the
2323- updatable image on the board. Mandatory for accept capsule.
2424- - capsule-type - String to indicate type of capsule to generate. Valid
2525- values are 'accept' and 'revert'.
2222+ - image-guid: Image GUID which will be used for identifying the
2323+ updatable image on the board. Mandatory for accept capsule.
2424+ - capsule-type - String to indicate type of capsule to generate. Valid
2525+ values are 'accept' and 'revert'.
26262727 For more details on the description of the capsule format, and the capsule
2828 update functionality, refer Section 8.5 and Chapter 23 in the `UEFI
2929 specification`_. For more information on the empty capsule, refer the
3030 sections 2.3.2 and 2.3.3 in the `Dependable Boot specification`_.
31313232- A typical accept empty capsule entry node would then look something like this
3232+ A typical accept empty capsule entry node would then look something like
3333+ this::
33343434- empty-capsule {
3535+ empty-capsule {
3536 type = "efi-empty-capsule";
3637 /* GUID of image being accepted */
3738 image-type-id = SANDBOX_UBOOT_IMAGE_GUID;
3839 capsule-type = "accept";
3939- };
4040+ };
40414141- A typical revert empty capsule entry node would then look something like this
4242+ A typical revert empty capsule entry node would then look something like
4343+ this::
42444343- empty-capsule {
4545+ empty-capsule {
4446 type = "efi-empty-capsule";
4547 capsule-type = "revert";
4646- };
4848+ };
47494850 The empty capsules do not have any input payload image.
4951
+1-1
tools/binman/etype/intel_descriptor.py
···5959 if self.missing:
6060 # Return zero offsets so that these entries get placed somewhere
6161 if self.HasSibling('intel-me'):
6262- info['intel-me'] = [0, None]
6262+ info['intel-me'] = [0x1000, None]
6363 return info
6464 offset = self.data.find(FD_SIGNATURE)
6565 if offset == -1:
+23-22
tools/binman/etype/ti_secure.py
···5353 - keyfile: Filename of file containing key to sign binary with
5454 - sha: Hash function to be used for signing
5555 - auth-in-place: This is an integer field that contains two pieces
5656- of information
5757- Lower Byte - Remains 0x02 as per our use case
5858- ( 0x02: Move the authenticated binary back to the header )
5959- Upper Byte - The Host ID of the core owning the firewall
5656+ of information:
5757+5858+ - Lower Byte - Remains 0x02 as per our use case
5959+ ( 0x02: Move the authenticated binary back to the header )
6060+ - Upper Byte - The Host ID of the core owning the firewall
60616162 Output files:
6263 - input.<unique_name> - input file passed to openssl
···6970 firewall nodes that describe the configurations of firewall that TIFS
7071 will be doing after reading the certificate.
71727272- The syntax of the firewall nodes are as such:
7373+ The syntax of the firewall nodes are as such::
73747474- firewall-257-0 {
7575- id = <257>; /* The ID of the firewall being configured */
7676- region = <0>; /* Region number to configure */
7575+ firewall-257-0 {
7676+ id = <257>; /* The ID of the firewall being configured */
7777+ region = <0>; /* Region number to configure */
77787878- control = /* The control register */
7979- <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>;
7979+ control = /* The control register */
8080+ <(FWCTRL_EN | FWCTRL_LOCK | FWCTRL_BG | FWCTRL_CACHE)>;
80818181- permissions = /* The permission registers */
8282- <((FWPRIVID_ALL << FWPRIVID_SHIFT) |
8383- FWPERM_SECURE_PRIV_RWCD |
8484- FWPERM_SECURE_USER_RWCD |
8585- FWPERM_NON_SECURE_PRIV_RWCD |
8686- FWPERM_NON_SECURE_USER_RWCD)>;
8282+ permissions = /* The permission registers */
8383+ <((FWPRIVID_ALL << FWPRIVID_SHIFT) |
8484+ FWPERM_SECURE_PRIV_RWCD |
8585+ FWPERM_SECURE_USER_RWCD |
8686+ FWPERM_NON_SECURE_PRIV_RWCD |
8787+ FWPERM_NON_SECURE_USER_RWCD)>;
87888888- /* More defines can be found in k3-security.h */
8989+ /* More defines can be found in k3-security.h */
89909090- start_address = /* The Start Address of the firewall */
9191- <0x0 0x0>;
9292- end_address = /* The End Address of the firewall */
9393- <0xff 0xffffffff>;
9494- };
9191+ start_address = /* The Start Address of the firewall */
9292+ <0x0 0x0>;
9393+ end_address = /* The End Address of the firewall */
9494+ <0xff 0xffffffff>;
9595+ };
959696979798 openssl signs the provided data, using the TI templated config file and
···2929 settings.read(config_fname)
30303131def add_file(data):
3232- settings.readfp(io.StringIO(data))
3232+ settings.read_file(io.StringIO(data))
3333+3434+def add_section(name):
3535+ settings.add_section(name)
33363437def get_items(section):
3538 """Get the items from a section of the config.
+11-12
tools/buildman/builder.py
···255255256256 def __init__(self, toolchains, base_dir, git_dir, num_threads, num_jobs,
257257 gnu_make='make', checkout=True, show_unknown=True, step=1,
258258- no_subdirs=False, full_path=False, verbose_build=False,
259259- mrproper=False, per_board_out_dir=False,
260260- config_only=False, squash_config_y=False,
261261- warnings_as_errors=False, work_in_output=False,
262262- test_thread_exceptions=False, adjust_cfg=None,
263263- allow_missing=False, no_lto=False, reproducible_builds=False,
264264- force_build=False, force_build_failures=False,
265265- force_reconfig=False, in_tree=False,
266266- force_config_on_failure=False, make_func=None):
258258+ no_subdirs=False, verbose_build=False,
259259+ mrproper=False, fallback_mrproper=False,
260260+ per_board_out_dir=False, config_only=False,
261261+ squash_config_y=False, warnings_as_errors=False,
262262+ work_in_output=False, test_thread_exceptions=False,
263263+ adjust_cfg=None, allow_missing=False, no_lto=False,
264264+ reproducible_builds=False, force_build=False,
265265+ force_build_failures=False, force_reconfig=False,
266266+ in_tree=False, force_config_on_failure=False, make_func=None):
267267 """Create a new Builder object
268268269269 Args:
···279279 step: 1 to process every commit, n to process every nth commit
280280 no_subdirs: Don't create subdirectories when building current
281281 source for a single board
282282- full_path: Return the full path in CROSS_COMPILE and don't set
283283- PATH
284282 verbose_build: Run build with V=1 and don't use 'make -s'
285283 mrproper: Always run 'make mrproper' when configuring
284284+ fallback_mrproper: Run 'make mrproper' and retry on build failure
286285 per_board_out_dir: Build in a separate persistent directory per
287286 board rather than a thread-specific directory
288287 config_only: Only configure each build, don't build it
···336335 self._step = step
337336 self._error_lines = 0
338337 self.no_subdirs = no_subdirs
339339- self.full_path = full_path
340338 self.verbose_build = verbose_build
341339 self.config_only = config_only
342340 self.squash_config_y = squash_config_y
···352350 self.force_reconfig = force_reconfig
353351 self.in_tree = in_tree
354352 self.force_config_on_failure = force_config_on_failure
353353+ self.fallback_mrproper = fallback_mrproper
355354356355 if not self.squash_config_y:
357356 self.config_filenames += EXTRA_CONFIG_FILENAMES
+24-16
tools/buildman/builderthread.py
···240240 return args, cwd, src_dir
241241242242 def _reconfigure(self, commit, brd, cwd, args, env, config_args, config_out,
243243- cmd_list):
243243+ cmd_list, mrproper):
244244 """Reconfigure the build
245245246246 Args:
···251251 env (dict): Environment strings
252252 config_args (list of str): defconfig arg for this board
253253 cmd_list (list of str): List to add the commands to, for logging
254254+ mrproper (bool): True to run mrproper first
254255255256 Returns:
256257 CommandResult object
257258 """
258258- if self.mrproper:
259259+ if mrproper:
259260 result = self.make(commit, brd, 'mrproper', cwd, 'mrproper', *args,
260261 env=env)
261262 config_out.write(result.combined)
···380381 commit = 'current'
381382 return commit
382383383383- def _config_and_build(self, commit_upto, brd, work_dir, do_config,
384384+ def _config_and_build(self, commit_upto, brd, work_dir, do_config, mrproper,
384385 config_only, adjust_cfg, commit, out_dir, out_rel_dir,
385386 result):
386387 """Do the build, configuring first if necessary
···390391 brd (Board): Board to create arguments for
391392 work_dir (str): Directory to which the source will be checked out
392393 do_config (bool): True to run a make <board>_defconfig on the source
394394+ mrproper (bool): True to run mrproper first
393395 config_only (bool): Only configure the source, do not build it
394396 adjust_cfg (list of str): See the cfgutil module and run_commit()
395397 commit (Commit): Commit only being built
···404406 the next incremental build
405407 """
406408 # Set up the environment and command line
407407- env = self.toolchain.MakeEnvironment(self.builder.full_path)
409409+ env = self.toolchain.MakeEnvironment()
408410 mkdir(out_dir)
409411410412 args, cwd, src_dir = self._build_args(brd, out_dir, out_rel_dir,
···419421 cmd_list = []
420422 if do_config or adjust_cfg:
421423 result = self._reconfigure(
422422- commit, brd, cwd, args, env, config_args, config_out, cmd_list)
424424+ commit, brd, cwd, args, env, config_args, config_out, cmd_list,
425425+ mrproper)
423426 do_config = False # No need to configure next time
424427 if adjust_cfg:
425428 cfgutil.adjust_cfg_file(cfg_file, adjust_cfg)
···445448 result.cmd_list = cmd_list
446449 return result, do_config
447450448448- def run_commit(self, commit_upto, brd, work_dir, do_config, config_only,
449449- force_build, force_build_failures, work_in_output,
450450- adjust_cfg):
451451+ def run_commit(self, commit_upto, brd, work_dir, do_config, mrproper,
452452+ config_only, force_build, force_build_failures,
453453+ work_in_output, adjust_cfg):
451454 """Build a particular commit.
452455453456 If the build is already done, and we are not forcing a build, we skip
···458461 brd (Board): Board to build
459462 work_dir (str): Directory to which the source will be checked out
460463 do_config (bool): True to run a make <board>_defconfig on the source
464464+ mrproper (bool): True to run mrproper first
461465 config_only (bool): Only configure the source, do not build it
462466 force_build (bool): Force a build even if one was previously done
463467 force_build_failures (bool): Force a bulid if the previous result
···498502 if self.toolchain:
499503 commit = self._checkout(commit_upto, work_dir)
500504 result, do_config = self._config_and_build(
501501- commit_upto, brd, work_dir, do_config, config_only,
502502- adjust_cfg, commit, out_dir, out_rel_dir, result)
505505+ commit_upto, brd, work_dir, do_config, mrproper,
506506+ config_only, adjust_cfg, commit, out_dir, out_rel_dir,
507507+ result)
503508 result.already_done = False
504509505510 result.toolchain = self.toolchain
···569574 outf.write(f'{result.return_code}')
570575571576 # Write out the image and function size information and an objdump
572572- env = result.toolchain.MakeEnvironment(self.builder.full_path)
577577+ env = result.toolchain.MakeEnvironment()
573578 with open(os.path.join(build_dir, 'out-env'), 'wb') as outf:
574579 for var in sorted(env.keys()):
575580 outf.write(b'%s="%s"' % (var, env[var]))
···688693 force_build = False
689694 for commit_upto in range(0, len(job.commits), job.step):
690695 result, request_config = self.run_commit(commit_upto, brd,
691691- work_dir, do_config, self.builder.config_only,
696696+ work_dir, do_config, self.mrproper,
697697+ self.builder.config_only,
692698 force_build or self.builder.force_build,
693699 self.builder.force_build_failures,
694700 job.work_in_output, job.adjust_cfg)
695701 failed = result.return_code or result.stderr
696702 did_config = do_config
697697- if failed and not do_config:
703703+ if failed and not do_config and not self.mrproper:
698704 # If our incremental build failed, try building again
699705 # with a reconfig.
700706 if self.builder.force_config_on_failure:
701707 result, request_config = self.run_commit(commit_upto,
702702- brd, work_dir, True, False, True, False,
703703- job.work_in_output, job.adjust_cfg)
708708+ brd, work_dir, True,
709709+ self.mrproper or self.builder.fallback_mrproper,
710710+ False, True, False, job.work_in_output,
711711+ job.adjust_cfg)
704712 did_config = True
705713 if not self.builder.force_reconfig:
706714 do_config = request_config
···744752 else:
745753 # Just build the currently checked-out build
746754 result, request_config = self.run_commit(None, brd, work_dir, True,
747747- self.builder.config_only, True,
755755+ self.mrproper, self.builder.config_only, True,
748756 self.builder.force_build_failures, job.work_in_output,
749757 job.adjust_cfg)
750758 result.commit_upto = 0
+7-1
tools/buildman/buildman.rst
···995995first commit for each board. This reduces the amount of work 'make' does, and
996996hence speeds up the build. To force use of 'make mrproper', use -the -m flag.
997997This flag will slow down any buildman invocation, since it increases the amount
998998-of work done on any build.
998998+of work done on any build. An alternative is to use the --fallback-mrproper
999999+flag, which retries the build with 'make mrproper' only after a build failure.
999100010001001One possible application of buildman is as part of a continual edit, build,
10011002edit, build, ... cycle; repeatedly applying buildman to the same change or
···12841285then buildman hangs. Failing to handle any eventuality is a bug in buildman and
12851286should be reported. But you can use -T0 to disable threading and hopefully
12861287figure out the root cause of the build failure.
12881288+12891289+For situations where buildman is invoked from multiple running processes, it is
12901290+sometimes useful to have buildman wait until the others have finished. Use the
12911291+--process-limit option for this: --process-limit 1 will allow only one buildman
12921292+to process jobs at a time.
1287129312881294Build summary
12891295-------------
+5-3
tools/buildman/cmdline.py
···9090 parser.add_argument('--list-tool-chains', action='store_true', default=False,
9191 help='List available tool chains (use -v to see probing detail)')
9292 parser.add_argument('-m', '--mrproper', action='store_true',
9393- default=False, help="Run 'make mrproper before reconfiguring")
9393+ default=False, help="Run 'make mrproper' before reconfiguring")
9494+ parser.add_argument('--fallback-mrproper', action='store_true',
9595+ default=False, help="Run 'make mrproper' and retry on build failure")
9496 parser.add_argument(
9597 '-M', '--allow-missing', action='store_true', default=False,
9698 help='Tell binman to allow missing blobs and generate fake ones as needed')
···121123 help="Override host toochain to use for sandbox (e.g. 'clang-7')")
122124 parser.add_argument('-Q', '--quick', action='store_true',
123125 default=False, help='Do a rough build, with limited warning resolution')
124124- parser.add_argument('-p', '--full-path', action='store_true',
125125- default=False, help="Use full toolchain path in CROSS_COMPILE")
126126 parser.add_argument('-P', '--per-board-out-dir', action='store_true',
127127 default=False, help="Use an O= (output) directory per board rather than per thread")
128128 parser.add_argument('--print-arch', action='store_true',
129129 default=False, help="Print the architecture for a board (ARCH=)")
130130+ parser.add_argument('--process-limit', type=int,
131131+ default=0, help='Limit to number of buildmans running at once')
130132 parser.add_argument('-r', '--reproducible-builds', action='store_true',
131133 help='Set SOURCE_DATE_EPOCH=0 to suuport a reproducible build')
132134 parser.add_argument('-R', '--regen-board-list', type=str,
+141-4
tools/buildman/control.py
···77This holds the main control logic for buildman, when not running tests.
88"""
991010+import getpass
1011import multiprocessing
1112import os
1213import shutil
1314import sys
1515+import tempfile
1616+import time
14171518from buildman import boards
1619from buildman import bsettings
···2124from patman import patchstream
2225from u_boot_pylib import command
2326from u_boot_pylib import terminal
2424-from u_boot_pylib.terminal import tprint
2727+from u_boot_pylib import tools
2828+from u_boot_pylib.terminal import print_clear, tprint
25292630TEST_BUILDER = None
3131+3232+# Space-separated list of buildman process IDs currently running jobs
3333+RUNNING_FNAME = f'buildmanq.{getpass.getuser()}'
3434+3535+# Lock file for access to RUNNING_FILE
3636+LOCK_FNAME = f'{RUNNING_FNAME}.lock'
3737+3838+# Wait time for access to lock (seconds)
3939+LOCK_WAIT_S = 10
4040+4141+# Wait time to start running
4242+RUN_WAIT_S = 300
27432844def get_plural(count):
2945 """Returns a plural 's' if count is not 1"""
···578594 return adjust_cfg
579595580596597597+def read_procs(tmpdir=tempfile.gettempdir()):
598598+ """Read the list of running buildman processes
599599+600600+ If the list is corrupted, returns an empty list
601601+602602+ Args:
603603+ tmpdir (str): Temporary directory to use (for testing only)
604604+ """
605605+ running_fname = os.path.join(tmpdir, RUNNING_FNAME)
606606+ procs = []
607607+ if os.path.exists(running_fname):
608608+ items = tools.read_file(running_fname, binary=False).split()
609609+ try:
610610+ procs = [int(x) for x in items]
611611+ except ValueError: # Handle invalid format
612612+ pass
613613+ return procs
614614+615615+616616+def check_pid(pid):
617617+ """Check for existence of a unix PID
618618+619619+ https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
620620+621621+ Args:
622622+ pid (int): PID to check
623623+624624+ Returns:
625625+ True if it exists, else False
626626+ """
627627+ try:
628628+ os.kill(pid, 0)
629629+ except OSError:
630630+ return False
631631+ else:
632632+ return True
633633+634634+635635+def write_procs(procs, tmpdir=tempfile.gettempdir()):
636636+ """Write the list of running buildman processes
637637+638638+ Args:
639639+ tmpdir (str): Temporary directory to use (for testing only)
640640+ """
641641+ running_fname = os.path.join(tmpdir, RUNNING_FNAME)
642642+ tools.write_file(running_fname, ' '.join([str(p) for p in procs]),
643643+ binary=False)
644644+645645+ # Allow another user to access the file
646646+ os.chmod(running_fname, 0o666)
647647+648648+def wait_for_process_limit(limit, tmpdir=tempfile.gettempdir(),
649649+ pid=os.getpid()):
650650+ """Wait until the number of buildman processes drops to the limit
651651+652652+ This uses FileLock to protect a 'running' file, which contains a list of
653653+ PIDs of running buildman processes. The number of PIDs in the file indicates
654654+ the number of running processes.
655655+656656+ When buildman starts up, it calls this function to wait until it is OK to
657657+ start the build.
658658+659659+ On exit, no attempt is made to remove the PID from the file, since other
660660+ buildman processes will notice that the PID is no-longer valid, and ignore
661661+ it.
662662+663663+ Two timeouts are provided:
664664+ LOCK_WAIT_S: length of time to wait for the lock; if this occurs, the
665665+ lock is busted / removed before trying again
666666+ RUN_WAIT_S: length of time to wait to be allowed to run; if this occurs,
667667+ the build starts, with the PID being added to the file.
668668+669669+ Args:
670670+ limit (int): Maximum number of buildman processes, including this one;
671671+ must be > 0
672672+ tmpdir (str): Temporary directory to use (for testing only)
673673+ pid (int): Current process ID (for testing only)
674674+ """
675675+ from filelock import Timeout, FileLock
676676+677677+ running_fname = os.path.join(tmpdir, RUNNING_FNAME)
678678+ lock_fname = os.path.join(tmpdir, LOCK_FNAME)
679679+ lock = FileLock(lock_fname)
680680+681681+ # Allow another user to access the file
682682+ col = terminal.Color()
683683+ tprint('Waiting for other buildman processes...', newline=False,
684684+ colour=col.RED)
685685+686686+ claimed = False
687687+ deadline = time.time() + RUN_WAIT_S
688688+ while True:
689689+ try:
690690+ with lock.acquire(timeout=LOCK_WAIT_S):
691691+ os.chmod(lock_fname, 0o666)
692692+ procs = read_procs(tmpdir)
693693+694694+ # Drop PIDs which are not running
695695+ procs = list(filter(check_pid, procs))
696696+697697+ # If we haven't hit the limit, add ourself
698698+ if len(procs) < limit:
699699+ tprint('done...', newline=False)
700700+ claimed = True
701701+ if time.time() >= deadline:
702702+ tprint('timeout...', newline=False)
703703+ claimed = True
704704+ if claimed:
705705+ write_procs(procs + [pid], tmpdir)
706706+ break
707707+708708+ except Timeout:
709709+ tprint('failed to get lock: busting...', newline=False)
710710+ os.remove(lock_fname)
711711+712712+ time.sleep(1)
713713+ tprint('starting build', newline=False)
714714+ print_clear()
715715+581716def do_buildman(args, toolchains=None, make_func=None, brds=None,
582717 clean_dir=False, test_thread_exceptions=False):
583718 """The main control code for buildman
···653788 builder = Builder(toolchains, output_dir, git_dir,
654789 args.threads, args.jobs, checkout=True,
655790 show_unknown=args.show_unknown, step=args.step,
656656- no_subdirs=args.no_subdirs, full_path=args.full_path,
657657- verbose_build=args.verbose_build,
658658- mrproper=args.mrproper,
791791+ no_subdirs=args.no_subdirs, verbose_build=args.verbose_build,
792792+ mrproper=args.mrproper, fallback_mrproper=args.fallback_mrproper,
659793 per_board_out_dir=args.per_board_out_dir,
660794 config_only=args.config_only,
661795 squash_config_y=not args.preserve_config_y,
···675809 force_config_on_failure=not args.quick, make_func=make_func)
676810677811 TEST_BUILDER = builder
812812+813813+ if args.process_limit:
814814+ wait_for_process_limit(args.process_limit)
678815679816 return run_builder(builder, series.commits if series else None,
680817 brds.get_selected_dict(), args)
+36-36
tools/buildman/func_test.py
···807807 params, warnings = self._boards.scan_defconfigs(src, src)
808808809809 # We should get two boards
810810- self.assertEquals(2, len(params))
810810+ self.assertEqual(2, len(params))
811811 self.assertFalse(warnings)
812812 first = 0 if params[0]['target'] == 'board0' else 1
813813 board0 = params[first]
814814 board2 = params[1 - first]
815815816816- self.assertEquals('arm', board0['arch'])
817817- self.assertEquals('armv7', board0['cpu'])
818818- self.assertEquals('-', board0['soc'])
819819- self.assertEquals('Tester', board0['vendor'])
820820- self.assertEquals('ARM Board 0', board0['board'])
821821- self.assertEquals('config0', board0['config'])
822822- self.assertEquals('board0', board0['target'])
816816+ self.assertEqual('arm', board0['arch'])
817817+ self.assertEqual('armv7', board0['cpu'])
818818+ self.assertEqual('-', board0['soc'])
819819+ self.assertEqual('Tester', board0['vendor'])
820820+ self.assertEqual('ARM Board 0', board0['board'])
821821+ self.assertEqual('config0', board0['config'])
822822+ self.assertEqual('board0', board0['target'])
823823824824- self.assertEquals('powerpc', board2['arch'])
825825- self.assertEquals('ppc', board2['cpu'])
826826- self.assertEquals('mpc85xx', board2['soc'])
827827- self.assertEquals('Tester', board2['vendor'])
828828- self.assertEquals('PowerPC board 1', board2['board'])
829829- self.assertEquals('config2', board2['config'])
830830- self.assertEquals('board2', board2['target'])
824824+ self.assertEqual('powerpc', board2['arch'])
825825+ self.assertEqual('ppc', board2['cpu'])
826826+ self.assertEqual('mpc85xx', board2['soc'])
827827+ self.assertEqual('Tester', board2['vendor'])
828828+ self.assertEqual('PowerPC board 1', board2['board'])
829829+ self.assertEqual('config2', board2['config'])
830830+ self.assertEqual('board2', board2['target'])
831831832832 def test_output_is_new(self):
833833 """Test detecting new changes to Kconfig"""
···898898 params_list, warnings = self._boards.build_board_list(config_dir, src)
899899900900 # There should be two boards no warnings
901901- self.assertEquals(2, len(params_list))
901901+ self.assertEqual(2, len(params_list))
902902 self.assertFalse(warnings)
903903904904 # Set an invalid status line in the file
···907907 for line in orig_data.splitlines(keepends=True)]
908908 tools.write_file(main, ''.join(lines), binary=False)
909909 params_list, warnings = self._boards.build_board_list(config_dir, src)
910910- self.assertEquals(2, len(params_list))
910910+ self.assertEqual(2, len(params_list))
911911 params = params_list[0]
912912 if params['target'] == 'board2':
913913 params = params_list[1]
914914- self.assertEquals('-', params['status'])
915915- self.assertEquals(["WARNING: Other: unknown status for 'board0'"],
914914+ self.assertEqual('-', params['status'])
915915+ self.assertEqual(["WARNING: Other: unknown status for 'board0'"],
916916 warnings)
917917918918 # Remove the status line (S:) from a file
···920920 if not line.startswith('S:')]
921921 tools.write_file(main, ''.join(lines), binary=False)
922922 params_list, warnings = self._boards.build_board_list(config_dir, src)
923923- self.assertEquals(2, len(params_list))
924924- self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings)
923923+ self.assertEqual(2, len(params_list))
924924+ self.assertEqual(["WARNING: -: unknown status for 'board0'"], warnings)
925925926926 # Remove the configs/ line (F:) from a file - this is the last line
927927 data = ''.join(orig_data.splitlines(keepends=True)[:-1])
928928 tools.write_file(main, data, binary=False)
929929 params_list, warnings = self._boards.build_board_list(config_dir, src)
930930- self.assertEquals(2, len(params_list))
931931- self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings)
930930+ self.assertEqual(2, len(params_list))
931931+ self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings)
932932933933 # Mark a board as orphaned - this should give a warning
934934 lines = ['S: Orphaned' if line.startswith('S') else line
935935 for line in orig_data.splitlines(keepends=True)]
936936 tools.write_file(main, ''.join(lines), binary=False)
937937 params_list, warnings = self._boards.build_board_list(config_dir, src)
938938- self.assertEquals(2, len(params_list))
939939- self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings)
938938+ self.assertEqual(2, len(params_list))
939939+ self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings)
940940941941 # Change the maintainer to '-' - this should give a warning
942942 lines = ['M: -' if line.startswith('M') else line
943943 for line in orig_data.splitlines(keepends=True)]
944944 tools.write_file(main, ''.join(lines), binary=False)
945945 params_list, warnings = self._boards.build_board_list(config_dir, src)
946946- self.assertEquals(2, len(params_list))
947947- self.assertEquals(["WARNING: -: unknown status for 'board0'"], warnings)
946946+ self.assertEqual(2, len(params_list))
947947+ self.assertEqual(["WARNING: -: unknown status for 'board0'"], warnings)
948948949949 # Remove the maintainer line (M:) from a file
950950 lines = [line for line in orig_data.splitlines(keepends=True)
951951 if not line.startswith('M:')]
952952 tools.write_file(main, ''.join(lines), binary=False)
953953 params_list, warnings = self._boards.build_board_list(config_dir, src)
954954- self.assertEquals(2, len(params_list))
955955- self.assertEquals(["WARNING: no maintainers for 'board0'"], warnings)
954954+ self.assertEqual(2, len(params_list))
955955+ self.assertEqual(["WARNING: no maintainers for 'board0'"], warnings)
956956957957 # Move the contents of the second file into this one, removing the
958958 # second file, to check multiple records in a single file.
···960960 tools.write_file(main, both_data, binary=False)
961961 os.remove(other)
962962 params_list, warnings = self._boards.build_board_list(config_dir, src)
963963- self.assertEquals(2, len(params_list))
963963+ self.assertEqual(2, len(params_list))
964964 self.assertFalse(warnings)
965965966966 # Add another record, this should be ignored with a warning
967967 extra = '\n\nAnother\nM: Fred\nF: configs/board9_defconfig\nS: other\n'
968968 tools.write_file(main, both_data + extra, binary=False)
969969 params_list, warnings = self._boards.build_board_list(config_dir, src)
970970- self.assertEquals(2, len(params_list))
970970+ self.assertEqual(2, len(params_list))
971971 self.assertFalse(warnings)
972972973973 # Add another TARGET to the Kconfig
···983983 tools.write_file(kc_file, orig_kc_data + extra)
984984 params_list, warnings = self._boards.build_board_list(config_dir, src,
985985 warn_targets=True)
986986- self.assertEquals(2, len(params_list))
987987- self.assertEquals(
986986+ self.assertEqual(2, len(params_list))
987987+ self.assertEqual(
988988 ['WARNING: board2_defconfig: Duplicate TARGET_xxx: board2 and other'],
989989 warnings)
990990···994994 tools.write_file(kc_file, b''.join(lines))
995995 params_list, warnings = self._boards.build_board_list(config_dir, src,
996996 warn_targets=True)
997997- self.assertEquals(2, len(params_list))
998998- self.assertEquals(
997997+ self.assertEqual(2, len(params_list))
998998+ self.assertEqual(
999999 ['WARNING: board2_defconfig: No TARGET_BOARD2 enabled'],
10001000 warnings)
10011001 tools.write_file(kc_file, orig_kc_data)
···10041004 data = ''.join(both_data.splitlines(keepends=True)[:-1])
10051005 tools.write_file(main, data + 'N: oa.*2\n', binary=False)
10061006 params_list, warnings = self._boards.build_board_list(config_dir, src)
10071007- self.assertEquals(2, len(params_list))
10071007+ self.assertEqual(2, len(params_list))
10081008 self.assertFalse(warnings)
1009100910101010 def testRegenBoards(self):
···22# Copyright (c) 2012 The Chromium OS Authors.
33#
4455+from filelock import FileLock
56import os
67import shutil
78import sys
89import tempfile
910import time
1011import unittest
1212+from unittest.mock import patch
11131214from buildman import board
1315from buildman import boards
···146148 self.toolchains.Add('arm-linux-gcc', test=False)
147149 self.toolchains.Add('sparc-linux-gcc', test=False)
148150 self.toolchains.Add('powerpc-linux-gcc', test=False)
151151+ self.toolchains.Add('/path/to/aarch64-linux-gcc', test=False)
149152 self.toolchains.Add('gcc', test=False)
150153151154 # Avoid sending any output
···155158 self.base_dir = tempfile.mkdtemp()
156159 if not os.path.isdir(self.base_dir):
157160 os.mkdir(self.base_dir)
161161+162162+ self.cur_time = 0
163163+ self.valid_pids = []
164164+ self.finish_time = None
165165+ self.finish_pid = None
158166159167 def tearDown(self):
160168 shutil.rmtree(self.base_dir)
···584592 if use_network:
585593 with test_util.capture_sys_output() as (stdout, stderr):
586594 url = self.toolchains.LocateArchUrl('arm')
587587- self.assertRegexpMatches(url, 'https://www.kernel.org/pub/tools/'
595595+ self.assertRegex(url, 'https://www.kernel.org/pub/tools/'
588596 'crosstool/files/bin/x86_64/.*/'
589597 'x86_64-gcc-.*-nolibc[-_]arm-.*linux-gnueabi.tar.xz')
590598···747755 self.assertEqual([
748756 ['MARY="mary"', 'Missing expected line: CONFIG_MARY="mary"']], result)
749757758758+ def get_procs(self):
759759+ running_fname = os.path.join(self.base_dir, control.RUNNING_FNAME)
760760+ items = tools.read_file(running_fname, binary=False).split()
761761+ return [int(x) for x in items]
762762+763763+ def get_time(self):
764764+ return self.cur_time
765765+766766+ def inc_time(self, amount):
767767+ self.cur_time += amount
768768+769769+ # Handle a process exiting
770770+ if self.finish_time == self.cur_time:
771771+ self.valid_pids = [pid for pid in self.valid_pids
772772+ if pid != self.finish_pid]
773773+774774+ def kill(self, pid, signal):
775775+ if pid not in self.valid_pids:
776776+ raise OSError('Invalid PID')
777777+778778+ def test_process_limit(self):
779779+ """Test wait_for_process_limit() function"""
780780+ tmpdir = self.base_dir
781781+782782+ with (patch('time.time', side_effect=self.get_time),
783783+ patch('time.sleep', side_effect=self.inc_time),
784784+ patch('os.kill', side_effect=self.kill)):
785785+ # Grab the process. Since there is no other profcess, this should
786786+ # immediately succeed
787787+ control.wait_for_process_limit(1, tmpdir=tmpdir, pid=1)
788788+ lines = terminal.get_print_test_lines()
789789+ self.assertEqual(0, self.cur_time)
790790+ self.assertEqual('Waiting for other buildman processes...',
791791+ lines[0].text)
792792+ self.assertEqual(self._col.RED, lines[0].colour)
793793+ self.assertEqual(False, lines[0].newline)
794794+ self.assertEqual(True, lines[0].bright)
795795+796796+ self.assertEqual('done...', lines[1].text)
797797+ self.assertEqual(None, lines[1].colour)
798798+ self.assertEqual(False, lines[1].newline)
799799+ self.assertEqual(True, lines[1].bright)
800800+801801+ self.assertEqual('starting build', lines[2].text)
802802+ self.assertEqual([1], control.read_procs(tmpdir))
803803+ self.assertEqual(None, lines[2].colour)
804804+ self.assertEqual(False, lines[2].newline)
805805+ self.assertEqual(True, lines[2].bright)
806806+807807+ # Try again, with a different PID...this should eventually timeout
808808+ # and start the build anyway
809809+ self.cur_time = 0
810810+ self.valid_pids = [1]
811811+ control.wait_for_process_limit(1, tmpdir=tmpdir, pid=2)
812812+ lines = terminal.get_print_test_lines()
813813+ self.assertEqual('Waiting for other buildman processes...',
814814+ lines[0].text)
815815+ self.assertEqual('timeout...', lines[1].text)
816816+ self.assertEqual(None, lines[1].colour)
817817+ self.assertEqual(False, lines[1].newline)
818818+ self.assertEqual(True, lines[1].bright)
819819+ self.assertEqual('starting build', lines[2].text)
820820+ self.assertEqual([1, 2], control.read_procs(tmpdir))
821821+ self.assertEqual(control.RUN_WAIT_S, self.cur_time)
822822+823823+ # Check lock-busting
824824+ self.cur_time = 0
825825+ self.valid_pids = [1, 2]
826826+ lock_fname = os.path.join(tmpdir, control.LOCK_FNAME)
827827+ lock = FileLock(lock_fname)
828828+ lock.acquire(timeout=1)
829829+ control.wait_for_process_limit(1, tmpdir=tmpdir, pid=3)
830830+ lines = terminal.get_print_test_lines()
831831+ self.assertEqual('Waiting for other buildman processes...',
832832+ lines[0].text)
833833+ self.assertEqual('failed to get lock: busting...', lines[1].text)
834834+ self.assertEqual(None, lines[1].colour)
835835+ self.assertEqual(False, lines[1].newline)
836836+ self.assertEqual(True, lines[1].bright)
837837+ self.assertEqual('timeout...', lines[2].text)
838838+ self.assertEqual('starting build', lines[3].text)
839839+ self.assertEqual([1, 2, 3], control.read_procs(tmpdir))
840840+ self.assertEqual(control.RUN_WAIT_S, self.cur_time)
841841+ lock.release()
842842+843843+ # Check handling of dead processes. Here we have PID 2 as a running
844844+ # process, even though the PID file contains 1, 2 and 3. So we can
845845+ # add one more PID, to make 2 and 4
846846+ self.cur_time = 0
847847+ self.valid_pids = [2]
848848+ control.wait_for_process_limit(2, tmpdir=tmpdir, pid=4)
849849+ lines = terminal.get_print_test_lines()
850850+ self.assertEqual('Waiting for other buildman processes...',
851851+ lines[0].text)
852852+ self.assertEqual('done...', lines[1].text)
853853+ self.assertEqual('starting build', lines[2].text)
854854+ self.assertEqual([2, 4], control.read_procs(tmpdir))
855855+ self.assertEqual(0, self.cur_time)
856856+857857+ # Try again, with PID 2 quitting at time 50. This allows the new
858858+ # build to start
859859+ self.cur_time = 0
860860+ self.valid_pids = [2, 4]
861861+ self.finish_pid = 2
862862+ self.finish_time = 50
863863+ control.wait_for_process_limit(2, tmpdir=tmpdir, pid=5)
864864+ lines = terminal.get_print_test_lines()
865865+ self.assertEqual('Waiting for other buildman processes...',
866866+ lines[0].text)
867867+ self.assertEqual('done...', lines[1].text)
868868+ self.assertEqual('starting build', lines[2].text)
869869+ self.assertEqual([4, 5], control.read_procs(tmpdir))
870870+ self.assertEqual(self.finish_time, self.cur_time)
871871+872872+ def call_make_environment(self, tchn, in_env=None):
873873+ """Call Toolchain.MakeEnvironment() and process the result
874874+875875+ Args:
876876+ tchn (Toolchain): Toolchain to use
877877+ in_env (dict): Input environment to use, None to use current env
878878+879879+ Returns:
880880+ tuple:
881881+ dict: Changes that MakeEnvironment has made to the environment
882882+ key: Environment variable that was changed
883883+ value: New value (for PATH this only includes components
884884+ which were added)
885885+ str: Full value of the new PATH variable
886886+ """
887887+ env = tchn.MakeEnvironment(env=in_env)
888888+889889+ # Get the original environment
890890+ orig_env = dict(os.environb if in_env is None else in_env)
891891+ orig_path = orig_env[b'PATH'].split(b':')
892892+893893+ # Find new variables
894894+ diff = dict((k, env[k]) for k in env if orig_env.get(k) != env[k])
895895+896896+ # Find new / different path components
897897+ diff_path = None
898898+ new_path = None
899899+ if b'PATH' in diff:
900900+ new_path = diff[b'PATH'].split(b':')
901901+ diff_paths = [p for p in new_path if p not in orig_path]
902902+ diff_path = b':'.join(p for p in new_path if p not in orig_path)
903903+ if diff_path:
904904+ diff[b'PATH'] = diff_path
905905+ else:
906906+ del diff[b'PATH']
907907+ return diff, new_path
908908+909909+ def test_toolchain_env(self):
910910+ """Test PATH and other environment settings for toolchains"""
911911+ # Use a toolchain which has a path
912912+ tchn = self.toolchains.Select('aarch64')
913913+914914+ # Normal case
915915+ diff = self.call_make_environment(tchn)[0]
916916+ self.assertEqual(
917917+ {b'CROSS_COMPILE': b'/path/to/aarch64-linux-', b'LC_ALL': b'C'},
918918+ diff)
919919+920920+ # When overriding the toolchain, only LC_ALL should be set
921921+ tchn.override_toolchain = True
922922+ diff = self.call_make_environment(tchn)[0]
923923+ self.assertEqual({b'LC_ALL': b'C'}, diff)
924924+925925+ # Test that virtualenv is handled correctly
926926+ tchn.override_toolchain = False
927927+ sys.prefix = '/some/venv'
928928+ env = dict(os.environb)
929929+ env[b'PATH'] = b'/some/venv/bin:other/things'
930930+ tchn.path = '/my/path'
931931+ diff, diff_path = self.call_make_environment(tchn, env)
932932+933933+ self.assertNotIn(b'PATH', diff)
934934+ self.assertEqual(None, diff_path)
935935+ self.assertEqual(
936936+ {b'CROSS_COMPILE': b'/my/path/aarch64-linux-', b'LC_ALL': b'C'},
937937+ diff)
938938+939939+ # Handle a toolchain wrapper
940940+ tchn.path = ''
941941+ bsettings.add_section('toolchain-wrapper')
942942+ bsettings.set_item('toolchain-wrapper', 'my-wrapper', 'fred')
943943+ diff = self.call_make_environment(tchn)[0]
944944+ self.assertEqual(
945945+ {b'CROSS_COMPILE': b'fred aarch64-linux-', b'LC_ALL': b'C'}, diff)
750946751947if __name__ == "__main__":
752948 unittest.main()
+11-13
tools/buildman/toolchain.py
···9090 if self.arch == 'sandbox' and override_toolchain:
9191 self.gcc = override_toolchain
92929393- env = self.MakeEnvironment(False)
9393+ env = self.MakeEnvironment()
94949595 # As a basic sanity check, run the C compiler with --version
9696 cmd = [fname, '--version']
···172172 else:
173173 raise ValueError('Unknown arg to GetEnvArgs (%d)' % which)
174174175175- def MakeEnvironment(self, full_path):
175175+ def MakeEnvironment(self, env=None):
176176 """Returns an environment for using the toolchain.
177177178178- Thie takes the current environment and adds CROSS_COMPILE so that
178178+ This takes the current environment and adds CROSS_COMPILE so that
179179 the tool chain will operate correctly. This also disables localized
180180- output and possibly unicode encoded output of all build tools by
180180+ output and possibly Unicode encoded output of all build tools by
181181 adding LC_ALL=C.
182182183183 Note that os.environb is used to obtain the environment, since in some
···188188 569-570: surrogates not allowed
189189190190 Args:
191191- full_path: Return the full path in CROSS_COMPILE and don't set
192192- PATH
191191+ env (dict of bytes): Original environment, used for testing
192192+193193 Returns:
194194 Dict containing the (bytes) environment to use. This is based on the
195195- current environment, with changes as needed to CROSS_COMPILE, PATH
196196- and LC_ALL.
195195+ current environment, with changes as needed to CROSS_COMPILE and
196196+ LC_ALL.
197197 """
198198- env = dict(os.environb)
198198+ env = dict(env or os.environb)
199199+199200 wrapper = self.GetWrapper()
200201201202 if self.override_toolchain:
202203 # We'll use MakeArgs() to provide this
203204 pass
204204- elif full_path:
205205+ else:
205206 env[b'CROSS_COMPILE'] = tools.to_bytes(
206207 wrapper + os.path.join(self.path, self.cross))
207207- else:
208208- env[b'CROSS_COMPILE'] = tools.to_bytes(wrapper + self.cross)
209209- env[b'PATH'] = tools.to_bytes(self.path) + b':' + env[b'PATH']
210208211209 env[b'LC_ALL'] = b'C'
212210
+8-2
tools/patman/func_test.py
···211211 'u-boot': ['u-boot@lists.denx.de'],
212212 'simon': [self.leb],
213213 'fred': [self.fred],
214214+ 'joe': [self.joe],
214215 }
215216216217 text = self._get_text('test01.txt')
···259260 self.assertEqual('Postfix:\t some-branch', next(lines))
260261 self.assertEqual('Cover: 4 lines', next(lines))
261262 self.assertEqual(' Cc: %s' % self.fred, next(lines))
263263+ self.assertEqual(' Cc: %s' % self.joe, next(lines))
262264 self.assertEqual(' Cc: %s' % self.leb,
263265 next(lines))
264266 self.assertEqual(' Cc: %s' % mel, next(lines))
···272274273275 self.assertEqual(('%s %s\0%s' % (args[0], rick, stefan)), cc_lines[0])
274276 self.assertEqual(
275275- '%s %s\0%s\0%s\0%s' % (args[1], self.fred, self.leb, rick, stefan),
277277+ '%s %s\0%s\0%s\0%s\0%s' % (args[1], self.fred, self.joe, self.leb,
278278+ rick, stefan),
276279 cc_lines[1])
277280278281 expected = '''
···290293 change
291294- Some changes
292295- Some notes for the cover letter
296296+- fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base()
293297294298Simon Glass (2):
295299 pci: Correct cast for sandbox
···339343- Multi
340344 line
341345 change
346346+- New
342347- Some changes
343348344349Changes in v2:
···540545 with open('.patman', 'w', buffering=1) as f:
541546 f.write('[settings]\n'
542547 'get_maintainer_script: dummy-script.sh\n'
543543- 'check_patch: False\n')
548548+ 'check_patch: False\n'
549549+ 'add_maintainers: True\n')
544550 with open('dummy-script.sh', 'w', buffering=1) as f:
545551 f.write('#!/usr/bin/env python\n'
546552 'print("hello@there.com")\n')
···350350 - This line will only appear in the cover letter
351351 <blank line>
352352353353-Patch-cc: Their Name <email>
353353+Commit-added-in: n
354354+ Add a change noting the version this commit was added in. This is
355355+ equivalent to::
356356+357357+ Commit-changes: n
358358+ - New
359359+360360+ Cover-changes: n
361361+ - <commit subject>
362362+363363+ It is a convenient shorthand for suppressing the '(no changes in vN)'
364364+ message.
365365+366366+Patch-cc / Commit-cc: Their Name <email>
354367 This copies a single patch to another email address. Note that the
355368 Cc: used by git send-email is ignored by patman, but will be
356369 interpreted by git send-email if you use it.
+4-4
tools/patman/settings.py
···59596060 # Check to make sure that bogus project gets general alias.
6161 >>> config = _ProjectConfigParser("zzz")
6262- >>> config.readfp(StringIO(sample_config))
6262+ >>> config.read_file(StringIO(sample_config))
6363 >>> str(config.get("alias", "enemies"))
6464 'Evil <evil@example.com>'
65656666 # Check to make sure that alias gets overridden by project.
6767 >>> config = _ProjectConfigParser("sm")
6868- >>> config.readfp(StringIO(sample_config))
6868+ >>> config.read_file(StringIO(sample_config))
6969 >>> str(config.get("alias", "enemies"))
7070 'Green G. <ugly@example.com>'
71717272 # Check to make sure that settings get merged with project.
7373 >>> config = _ProjectConfigParser("linux")
7474- >>> config.readfp(StringIO(sample_config))
7474+ >>> config.read_file(StringIO(sample_config))
7575 >>> sorted((str(a), str(b)) for (a, b) in config.items("settings"))
7676 [('am_hero', 'True'), ('check_patch_use_tree', 'True'), ('process_tags', 'False')]
77777878 # Check to make sure that settings works with unknown project.
7979 >>> config = _ProjectConfigParser("unknown")
8080- >>> config.readfp(StringIO(sample_config))
8080+ >>> config.read_file(StringIO(sample_config))
8181 >>> sorted((str(a), str(b)) for (a, b) in config.items("settings"))
8282 [('am_hero', 'True')]
8383 """
···2121Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
2222Series-version: 3
2323Patch-cc: fred
2424+Commit-cc: joe
2425Series-process-log: sort, uniq
2626+Commit-added-in: 4
2527Series-changes: 4
2628- Some changes
2729- Multi
+2
tools/patman/test/test01.txt
···4949 Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
5050 Series-version: 3
5151 Patch-cc: fred
5252+ Commit-cc: joe
5253 Series-process-log: sort, uniq
5454+ Commit-added-in: 4
5355 Series-changes: 4
5456 - Some changes
5557 - Multi
+5-2
tools/u_boot_pylib/terminal.py
···164164 global last_print_len
165165166166 if last_print_len:
167167- print('\r%s\r' % (' '* last_print_len), end='', flush=True)
168168- last_print_len = None
167167+ if print_test_mode:
168168+ print_test_list.append(PrintLine(None, None, None, None))
169169+ else:
170170+ print('\r%s\r' % (' '* last_print_len), end='', flush=True)
171171+ last_print_len = None
169172170173def set_print_test_mode(enable=True):
171174 """Go into test mode, where all printing is recorded"""
+8-3
tools/u_boot_pylib/test_util.py
···6060 prefix = ''
6161 if build_dir:
6262 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
6363- cmd = ('%spython3-coverage run '
6464- '--omit "%s" %s %s %s %s' % (prefix, ','.join(glob_list),
6363+6464+ # Detect a Python virtualenv and use 'coverage' instead
6565+ covtool = ('python3-coverage' if sys.prefix == sys.base_prefix else
6666+ 'coverage')
6767+6868+ cmd = ('%s%s run '
6969+ '--omit "%s" %s %s %s %s' % (prefix, covtool, ','.join(glob_list),
6570 prog, extra_args or '', test_cmd,
6671 single_thread or '-P1'))
6772 os.system(cmd)
6868- stdout = command.output('python3-coverage', 'report')
7373+ stdout = command.output(covtool, 'report')
6974 lines = stdout.splitlines()
7075 if required:
7176 # Convert '/path/to/name.py' just the module name 'name'