Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#!/usr/bin/env drgn
2#
3# Copyright (C) 2023 Tejun Heo <tj@kernel.org>
4# Copyright (C) 2023 Meta Platforms, Inc. and affiliates.
5
6desc = """
7This is a drgn script to show the current workqueue configuration. For more
8info on drgn, visit https://github.com/osandov/drgn.
9
10Affinity Scopes
11===============
12
13Shows the CPUs that can be used for unbound workqueues and how they will be
14grouped by each available affinity type. For each type:
15
16 nr_pods number of CPU pods in the affinity type
17 pod_cpus CPUs in each pod
18 pod_node NUMA node for memory allocation for each pod
19 cpu_pod pod that each CPU is associated to
20
21Worker Pools
22============
23
24Lists all worker pools indexed by their ID. For each pool:
25
26 ref number of pool_workqueue's associated with this pool
27 nice nice value of the worker threads in the pool
28 idle number of idle workers
29 workers number of all workers
30 cpu CPU the pool is associated with (per-cpu pool)
31 cpus CPUs the workers in the pool can run on (unbound pool)
32
33Workqueue CPU -> pool
34=====================
35
36Lists all workqueues along with their type and worker pool association. For
37each workqueue:
38
39 NAME TYPE[,FLAGS] POOL_ID...
40
41 NAME name of the workqueue
42 TYPE percpu, unbound or ordered
43 FLAGS S: strict affinity scope
44 POOL_ID worker pool ID associated with each possible CPU
45"""
46
47import sys
48
49import drgn
50from drgn.helpers.linux.list import list_for_each_entry,list_empty
51from drgn.helpers.linux.percpu import per_cpu_ptr
52from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu
53from drgn.helpers.linux.nodemask import for_each_node
54from drgn.helpers.linux.idr import idr_for_each
55
56import argparse
57parser = argparse.ArgumentParser(description=desc,
58 formatter_class=argparse.RawTextHelpFormatter)
59args = parser.parse_args()
60
61def err(s):
62 print(s, file=sys.stderr, flush=True)
63 sys.exit(1)
64
65def cpumask_str(cpumask):
66 output = ""
67 base = 0
68 v = 0
69 for cpu in for_each_cpu(cpumask[0]):
70 while cpu - base >= 32:
71 output += f'{hex(v)} '
72 base += 32
73 v = 0
74 v |= 1 << (cpu - base)
75 if v > 0:
76 output += f'{v:08x}'
77 return output.strip()
78
79wq_type_len = 9
80
81def wq_type_str(wq):
82 if wq.flags & WQ_BH:
83 return f'{"bh":{wq_type_len}}'
84 elif wq.flags & WQ_UNBOUND:
85 if wq.flags & WQ_ORDERED:
86 return f'{"ordered":{wq_type_len}}'
87 else:
88 if wq.unbound_attrs.affn_strict:
89 return f'{"unbound,S":{wq_type_len}}'
90 else:
91 return f'{"unbound":{wq_type_len}}'
92 else:
93 return f'{"percpu":{wq_type_len}}'
94
95worker_pool_idr = prog['worker_pool_idr']
96workqueues = prog['workqueues']
97wq_unbound_cpumask = prog['wq_unbound_cpumask']
98wq_pod_types = prog['wq_pod_types']
99wq_affn_dfl = prog['wq_affn_dfl']
100wq_affn_names = prog['wq_affn_names']
101
102WQ_BH = prog['WQ_BH']
103WQ_UNBOUND = prog['WQ_UNBOUND']
104WQ_ORDERED = prog['__WQ_ORDERED']
105WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM']
106
107WQ_AFFN_CPU = prog['WQ_AFFN_CPU']
108WQ_AFFN_SMT = prog['WQ_AFFN_SMT']
109WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE']
110WQ_AFFN_CACHE_SHARD = prog['WQ_AFFN_CACHE_SHARD']
111WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA']
112WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM']
113
114POOL_BH = prog['POOL_BH']
115
116WQ_NAME_LEN = prog['WQ_NAME_LEN'].value_()
117cpumask_str_len = len(cpumask_str(wq_unbound_cpumask))
118
119print('Affinity Scopes')
120print('===============')
121
122print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}')
123
124def print_pod_type(pt):
125 print(f' nr_pods {pt.nr_pods.value_()}')
126
127 print(' pod_cpus', end='')
128 for pod in range(pt.nr_pods):
129 print(f' [{pod}]={cpumask_str(pt.pod_cpus[pod])}', end='')
130 print('')
131
132 print(' pod_node', end='')
133 for pod in range(pt.nr_pods):
134 print(f' [{pod}]={pt.pod_node[pod].value_()}', end='')
135 print('')
136
137 print(f' cpu_pod ', end='')
138 for cpu in for_each_possible_cpu(prog):
139 print(f' [{cpu}]={pt.cpu_pod[cpu].value_()}', end='')
140 print('')
141
142for affn in [WQ_AFFN_CPU, WQ_AFFN_SMT, WQ_AFFN_CACHE, WQ_AFFN_CACHE_SHARD, WQ_AFFN_NUMA, WQ_AFFN_SYSTEM]:
143 print('')
144 print(f'{wq_affn_names[affn].string_().decode().upper()}{" (default)" if affn == wq_affn_dfl else ""}')
145 print_pod_type(wq_pod_types[affn])
146
147print('')
148print('Worker Pools')
149print('============')
150
151max_pool_id_len = 0
152max_ref_len = 0
153for pi, pool in idr_for_each(worker_pool_idr):
154 pool = drgn.Object(prog, 'struct worker_pool', address=pool)
155 max_pool_id_len = max(max_pool_id_len, len(f'{pi}'))
156 max_ref_len = max(max_ref_len, len(f'{pool.refcnt.value_()}'))
157
158for pi, pool in idr_for_each(worker_pool_idr):
159 pool = drgn.Object(prog, 'struct worker_pool', address=pool)
160 print(f'pool[{pi:0{max_pool_id_len}}] flags=0x{pool.flags.value_():02x} ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='')
161 print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
162 if pool.cpu >= 0:
163 print(f'cpu={pool.cpu.value_():3}', end='')
164 if pool.flags & POOL_BH:
165 print(' bh', end='')
166 else:
167 print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='')
168 print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='')
169 if pool.attrs.affn_strict:
170 print(' strict', end='')
171 print('')
172
173print('')
174print('Workqueue CPU -> pool')
175print('=====================')
176
177print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"type CPU":{wq_type_len}}', end='')
178for cpu in for_each_possible_cpu(prog):
179 print(f' {cpu:{max_pool_id_len}}', end='')
180print(' dfl]')
181
182for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
183 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} {wq_type_str(wq):10}', end='')
184
185 for cpu in for_each_possible_cpu(prog):
186 pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_()
187 field_len = max(len(str(cpu)), max_pool_id_len)
188 print(f' {pool_id:{field_len}}', end='')
189
190 if wq.flags & WQ_UNBOUND:
191 print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='')
192 print('')
193
194print('')
195print('Workqueue -> rescuer')
196print('====================')
197
198ucpus_len = max(cpumask_str_len, len("unbound_cpus"))
199rcpus_len = max(cpumask_str_len, len("rescuer_cpus"))
200
201print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"unbound_cpus":{ucpus_len}} pid {"rescuer_cpus":{rcpus_len}} ]')
202
203for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
204 if not (wq.flags & WQ_MEM_RECLAIM):
205 continue
206
207 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}}', end='')
208 if wq.unbound_attrs.value_() != 0:
209 print(f' {cpumask_str(wq.unbound_attrs.cpumask):{ucpus_len}}', end='')
210 else:
211 print(f' {"":{ucpus_len}}', end='')
212
213 print(f' {wq.rescuer.task.pid.value_():6}', end='')
214 print(f' {cpumask_str(wq.rescuer.task.cpus_ptr):{rcpus_len}}', end='')
215 print('')
216
217print('')
218print('Unbound workqueue -> node_nr/max_active')
219print('=======================================')
220
221if 'node_to_cpumask_map' in prog:
222 __cpu_online_mask = prog['__cpu_online_mask']
223 node_to_cpumask_map = prog['node_to_cpumask_map']
224 nr_node_ids = prog['nr_node_ids'].value_()
225
226 print(f'online_cpus={cpumask_str(__cpu_online_mask.address_of_())}')
227 for node in for_each_node():
228 print(f'NODE[{node:02}]={cpumask_str(node_to_cpumask_map[node])}')
229 print('')
230
231 print(f'[{"workqueue":^{WQ_NAME_LEN-1}} {"min":>4} {"max":>4}', end='')
232 for node in for_each_node():
233 print(f' {"NODE " + str(node):>9}', end='')
234 print(f' {"dfl":>9} ]')
235 print('')
236
237 for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
238 if not (wq.flags & WQ_UNBOUND):
239 continue
240
241 print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} ', end='')
242 print(f'{wq.min_active.value_():4} {wq.max_active.value_():4}', end='')
243 for node in for_each_node():
244 nna = wq.node_nr_active[node]
245 print(f' {f"{nna.nr.counter.value_()}/{nna.max.value_()}":>9}', end='')
246 nna = wq.node_nr_active[nr_node_ids]
247 print(f' {f"{nna.nr.counter.value_()}/{nna.max.value_()}":>9}')
248else:
249 printf(f'node_to_cpumask_map not present, is NUMA enabled?')