Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

scripts/make_fit: Compress dtbs in parallel

When there are 1500 device tree files it takes quite a while to compress
them. Do it in parallel.

Signed-off-by: Simon Glass <sjg@chromium.org>
Link: https://patch.msgid.link/20260106162738.2605574-7-sjg@chromium.org
Signed-off-by: Nathan Chancellor <nathan@kernel.org>

authored by

Simon Glass and committed by
Nathan Chancellor
c7c88b20 fcdcf22a

+50 -6
+50 -6
scripts/make_fit.py
··· 37 37 38 38 import argparse 39 39 import collections 40 + import multiprocessing 40 41 import os 41 42 import subprocess 42 43 import sys ··· 226 225 return comp_data 227 226 228 227 229 - def output_dtb(fsw, seq, fname, arch, compress): 228 + def compress_dtb(fname, compress): 229 + """Compress a single DTB file 230 + 231 + Args: 232 + fname (str): Filename containing the DTB 233 + compress (str): Compression algorithm, e.g. 'gzip' 234 + 235 + Returns: 236 + tuple: (str: fname, bytes: compressed_data) 237 + """ 238 + with open(fname, 'rb') as inf: 239 + compressed = compress_data(inf, compress) 240 + return fname, compressed 241 + 242 + 243 + def output_dtb(fsw, seq, fname, arch, compress, data=None): 230 244 """Write out a single devicetree to the FIT 231 245 232 246 Args: 233 247 fsw (libfdt.FdtSw): Object to use for writing 234 248 seq (int): Sequence number (1 for first) 235 249 fname (str): Filename containing the DTB 236 - arch: FIT architecture, e.g. 'arm64' 250 + arch (str): FIT architecture, e.g. 'arm64' 237 251 compress (str): Compressed algorithm, e.g. 'gzip' 252 + data (bytes): Pre-compressed data (optional) 238 253 """ 239 254 with fsw.add_node(f'fdt-{seq}'): 240 255 fsw.property_string('description', os.path.basename(fname)) ··· 258 241 fsw.property_string('arch', arch) 259 242 fsw.property_string('compression', compress) 260 243 261 - with open(fname, 'rb') as inf: 262 - compressed = compress_data(inf, compress) 263 - fsw.property('data', compressed) 244 + if data is None: 245 + with open(fname, 'rb') as inf: 246 + data = compress_data(inf, compress) 247 + fsw.property('data', data) 264 248 265 249 266 250 def process_dtb(fname, args): ··· 318 300 """ 319 301 seq = 0 320 302 size = 0 303 + 304 + # First figure out the unique DTB files that need compression 305 + todo = [] 306 + file_info = [] # List of (fname, model, compat, files) tuples 307 + 321 308 for fname in args.dtbs: 322 309 # Ignore non-DTB (*.dtb) files 323 310 if os.path.splitext(fname)[1] != '.dtb': ··· 334 311 sys.stderr.write(f'Error processing {fname}:\n') 335 312 raise e 336 313 314 + file_info.append((fname, model, compat, files)) 315 + for fn in files: 316 + if fn not in fdts and fn not in todo: 317 + todo.append(fn) 318 + 319 + # Compress all DTBs in parallel 320 + cache = {} 321 + if todo and args.compress != 'none': 322 + if args.verbose: 323 + print(f'Compressing {len(todo)} DTBs...') 324 + 325 + with multiprocessing.Pool() as pool: 326 + compress_args = [(fn, args.compress) for fn in todo] 327 + # unpacks each tuple, calls compress_dtb(fn, compress) in parallel 328 + results = pool.starmap(compress_dtb, compress_args) 329 + 330 + cache = dict(results) 331 + 332 + # Now write all DTBs to the FIT using pre-compressed data 333 + for fname, model, compat, files in file_info: 337 334 for fn in files: 338 335 if fn not in fdts: 339 336 seq += 1 340 337 size += os.path.getsize(fn) 341 - output_dtb(fsw, seq, fn, args.arch, args.compress) 338 + output_dtb(fsw, seq, fn, args.arch, args.compress, 339 + cache.get(fn)) 342 340 fdts[fn] = seq 343 341 344 342 files_seq = [fdts[fn] for fn in files]