Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2#include <errno.h>
3#include <perf/cpumap.h>
4#include <stdlib.h>
5#include <linux/refcount.h>
6#include <internal/cpumap.h>
7#include <asm/bug.h>
8#include <stdio.h>
9#include <string.h>
10#include <unistd.h>
11#include <ctype.h>
12#include <limits.h>
13#include "internal.h"
14#include <api/fs/fs.h>
15
16#define MAX_NR_CPUS 4096
17
18void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus)
19{
20 RC_CHK_ACCESS(map)->nr = nr_cpus;
21}
22
23struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus)
24{
25 RC_STRUCT(perf_cpu_map) *cpus;
26 struct perf_cpu_map *result;
27
28 if (nr_cpus == 0)
29 return NULL;
30
31 cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
32 if (ADD_RC_CHK(result, cpus)) {
33 cpus->nr = nr_cpus;
34 refcount_set(&cpus->refcnt, 1);
35 }
36 return result;
37}
38
39struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
40{
41 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
42
43 if (cpus)
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
45
46 return cpus;
47}
48
49static void cpu_map__delete(struct perf_cpu_map *map)
50{
51 if (map) {
52 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
53 "cpu_map refcnt unbalanced\n");
54 RC_CHK_FREE(map);
55 }
56}
57
58struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
59{
60 struct perf_cpu_map *result;
61
62 if (RC_CHK_GET(result, map))
63 refcount_inc(perf_cpu_map__refcnt(map));
64
65 return result;
66}
67
68void perf_cpu_map__put(struct perf_cpu_map *map)
69{
70 if (map) {
71 if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
72 cpu_map__delete(map);
73 else
74 RC_CHK_PUT(map);
75 }
76}
77
78static struct perf_cpu_map *cpu_map__new_sysconf(void)
79{
80 struct perf_cpu_map *cpus;
81 long nr_cpus, nr_cpus_conf;
82
83 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
84 if (nr_cpus < 0)
85 return NULL;
86
87 nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
88 if (nr_cpus != nr_cpus_conf) {
89 pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.",
90 nr_cpus, nr_cpus_conf, nr_cpus);
91 }
92
93 cpus = perf_cpu_map__alloc(nr_cpus);
94 if (cpus != NULL) {
95 for (long i = 0; i < nr_cpus; ++i)
96 RC_CHK_ACCESS(cpus)->map[i].cpu = i;
97 }
98
99 return cpus;
100}
101
102static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
103{
104 struct perf_cpu_map *cpus = NULL;
105 char *buf = NULL;
106 size_t buf_len;
107
108 if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
109 cpus = perf_cpu_map__new(buf);
110 free(buf);
111 }
112 return cpus;
113}
114
115struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
116{
117 struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
118
119 if (cpus)
120 return cpus;
121
122 return cpu_map__new_sysconf();
123}
124
125
126static int cmp_cpu(const void *a, const void *b)
127{
128 const struct perf_cpu *cpu_a = a, *cpu_b = b;
129
130 return cpu_a->cpu - cpu_b->cpu;
131}
132
133static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
134{
135 return RC_CHK_ACCESS(cpus)->map[idx];
136}
137
138static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus)
139{
140 size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
141 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
142
143 if (cpus != NULL) {
144 unsigned int j = 0;
145
146 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
147 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
148 /* Remove dups */
149 for (unsigned int i = 0; i < nr_cpus; i++) {
150 if (i == 0 ||
151 __perf_cpu_map__cpu(cpus, i).cpu !=
152 __perf_cpu_map__cpu(cpus, i - 1).cpu) {
153 RC_CHK_ACCESS(cpus)->map[j++].cpu =
154 __perf_cpu_map__cpu(cpus, i).cpu;
155 }
156 }
157 perf_cpu_map__set_nr(cpus, j);
158 assert(j <= nr_cpus);
159 }
160 return cpus;
161}
162
163struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
164{
165 struct perf_cpu_map *cpus = NULL;
166 unsigned long start_cpu, end_cpu = 0;
167 char *p = NULL;
168 unsigned int nr_cpus = 0, max_entries = 0;
169 struct perf_cpu *tmp_cpus = NULL, *tmp;
170
171 if (!cpu_list)
172 return perf_cpu_map__new_online_cpus();
173
174 /*
175 * must handle the case of empty cpumap to cover
176 * TOPOLOGY header for NUMA nodes with no CPU
177 * ( e.g., because of CPU hotplug)
178 */
179 if (!isdigit(*cpu_list) && *cpu_list != '\0')
180 goto out;
181
182 while (isdigit(*cpu_list)) {
183 p = NULL;
184 start_cpu = strtoul(cpu_list, &p, 0);
185 if (start_cpu >= INT16_MAX
186 || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
187 goto invalid;
188
189 if (*p == '-') {
190 cpu_list = ++p;
191 p = NULL;
192 end_cpu = strtoul(cpu_list, &p, 0);
193
194 if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n'))
195 goto invalid;
196
197 if (end_cpu < start_cpu)
198 goto invalid;
199 } else {
200 end_cpu = start_cpu;
201 }
202
203 WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
204 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
205
206 for (; start_cpu <= end_cpu; start_cpu++) {
207 /* check for duplicates */
208 for (unsigned int i = 0; i < nr_cpus; i++) {
209 if (tmp_cpus[i].cpu == (int16_t)start_cpu)
210 goto invalid;
211 }
212
213 if (nr_cpus == max_entries) {
214 max_entries += max(end_cpu - start_cpu + 1, 16UL);
215 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
216 if (tmp == NULL)
217 goto invalid;
218 tmp_cpus = tmp;
219 }
220 tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu;
221 }
222 if (*p)
223 ++p;
224
225 cpu_list = p;
226 }
227
228 if (nr_cpus > 0) {
229 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
230 } else if (*cpu_list != '\0') {
231 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
232 cpu_list);
233 cpus = perf_cpu_map__new_online_cpus();
234 } else {
235 cpus = perf_cpu_map__new_any_cpu();
236 }
237invalid:
238 free(tmp_cpus);
239out:
240 return cpus;
241}
242
243struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
244{
245 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
246
247 if (cpus)
248 RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
249
250 return cpus;
251}
252
253static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
254{
255 return RC_CHK_ACCESS(cpus)->nr;
256}
257
258struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx)
259{
260 struct perf_cpu result = {
261 .cpu = -1
262 };
263
264 if (cpus && idx < __perf_cpu_map__nr(cpus))
265 return __perf_cpu_map__cpu(cpus, idx);
266
267 return result;
268}
269
270unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
271{
272 return cpus ? __perf_cpu_map__nr(cpus) : 1;
273}
274
275bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
276{
277 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
278}
279
280bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
281{
282 if (!map)
283 return true;
284
285 return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
286}
287
288bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
289{
290 return map == NULL;
291}
292
293int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
294{
295 unsigned int low, high;
296
297 if (!cpus)
298 return -1;
299
300 low = 0;
301 high = __perf_cpu_map__nr(cpus);
302 while (low < high) {
303 int idx = (low + high) / 2;
304 struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
305
306 if (cpu_at_idx.cpu == cpu.cpu)
307 return idx;
308
309 if (cpu_at_idx.cpu > cpu.cpu)
310 high = idx;
311 else
312 low = idx + 1;
313 }
314
315 return -1;
316}
317
318bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
319{
320 return perf_cpu_map__idx(cpus, cpu) != -1;
321}
322
323bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
324{
325 unsigned int nr;
326
327 if (lhs == rhs)
328 return true;
329
330 if (!lhs || !rhs)
331 return false;
332
333 nr = __perf_cpu_map__nr(lhs);
334 if (nr != __perf_cpu_map__nr(rhs))
335 return false;
336
337 for (unsigned int idx = 0; idx < nr; idx++) {
338 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
339 return false;
340 }
341 return true;
342}
343
344bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
345{
346 return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
347}
348
349struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
350{
351 struct perf_cpu cpu, result = {
352 .cpu = -1
353 };
354 unsigned int idx;
355
356 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
357 result = cpu;
358 break;
359 }
360 return result;
361}
362
363struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
364{
365 struct perf_cpu result = {
366 .cpu = -1
367 };
368
369 if (!map)
370 return result;
371
372 // The CPUs are always sorted and nr is always > 0 as 0 length map is
373 // encoded as NULL.
374 return __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1);
375}
376
377/** Is 'b' a subset of 'a'. */
378bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
379{
380 if (a == b || !b)
381 return true;
382 if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
383 return false;
384
385 for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
386 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
387 return false;
388 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
389 j++;
390 if (j == __perf_cpu_map__nr(b))
391 return true;
392 }
393 }
394 return false;
395}
396
397/*
398 * Merge two cpumaps.
399 *
400 * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count
401 * change (similar to "realloc").
402 *
403 * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference
404 * count increased.
405 *
406 * Otherwise, '*orig' gets freed and replaced with a new map.
407 */
408int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
409{
410 struct perf_cpu *tmp_cpus;
411 unsigned int tmp_len, i, j, k;
412 struct perf_cpu_map *merged;
413
414 if (perf_cpu_map__is_subset(*orig, other))
415 return 0;
416 if (perf_cpu_map__is_subset(other, *orig)) {
417 perf_cpu_map__put(*orig);
418 *orig = perf_cpu_map__get(other);
419 return 0;
420 }
421
422 tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other);
423 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
424 if (!tmp_cpus)
425 return -ENOMEM;
426
427 /* Standard merge algorithm from wikipedia */
428 i = j = k = 0;
429 while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) {
430 if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
431 if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
432 j++;
433 tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
434 } else
435 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
436 }
437
438 while (i < __perf_cpu_map__nr(*orig))
439 tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
440
441 while (j < __perf_cpu_map__nr(other))
442 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
443 assert(k <= tmp_len);
444
445 merged = cpu_map__trim_new(k, tmp_cpus);
446 free(tmp_cpus);
447 perf_cpu_map__put(*orig);
448 *orig = merged;
449 return 0;
450}
451
452struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
453 struct perf_cpu_map *other)
454{
455 unsigned int i, j, k;
456 struct perf_cpu_map *merged;
457
458 if (perf_cpu_map__is_subset(other, orig))
459 return perf_cpu_map__get(orig);
460 if (perf_cpu_map__is_subset(orig, other))
461 return perf_cpu_map__get(other);
462
463 i = j = k = 0;
464 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
465 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
466 i++;
467 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
468 j++;
469 else { /* CPUs match. */
470 i++;
471 j++;
472 k++;
473 }
474 }
475 if (k == 0) /* Maps are completely disjoint. */
476 return NULL;
477
478 merged = perf_cpu_map__alloc(k);
479 if (!merged)
480 return NULL;
481 /* Entries are added to merged in sorted order, so no need to sort again. */
482 i = j = k = 0;
483 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
484 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
485 i++;
486 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
487 j++;
488 else {
489 j++;
490 RC_CHK_ACCESS(merged)->map[k++] = __perf_cpu_map__cpu(orig, i++);
491 }
492 }
493 return merged;
494}