Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-v6.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull more tracing updates from Steven Rostedt:

- Remove unneeded goto out statements

Over time, the logic was restructured but left a "goto out" where the
out label simply did a "return ret;". Instead of jumping to this out
label, simply return immediately and remove the out label.

- Add guard(ring_buffer_nest)

Some calls to the tracing ring buffer can happen when the ring buffer
is already being written to at the same context (for example, a
trace_printk() in between a ring_buffer_lock_reserve() and a
ring_buffer_unlock_commit()).

In order to not trigger the recursion detection, these functions use
ring_buffer_nest_start() and ring_buffer_nest_end(). Create a guard()
for these functions so that their use cases can be simplified and not
need to use goto for the release.

- Clean up the tracing code with guard() and __free() logic

There were several locations that were prime candidates for using
guard() and __free() helpers. Switch them over to use them.

- Fix output of function argument traces for unsigned int values

The function tracer with "func-args" option set will record up to 6
argument registers and then use BTF to format them for human
consumption when the trace file is read. There are several arguments
that are "unsigned long" and even "unsigned int" that are either and
address or a mask. It is easier to understand if they were printed
using hexadecimal instead of decimal. The old method just printed all
non-pointer values as signed integers, which made it even worse for
unsigned integers.

For instance, instead of:

__local_bh_disable_ip(ip=-2127311112, cnt=256) <-handle_softirqs

show:

__local_bh_disable_ip(ip=0xffffffff8133cef8, cnt=0x100) <-handle_softirqs"

* tag 'trace-v6.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing: Have unsigned int function args displayed as hexadecimal
ring-buffer: Convert ring_buffer_write() to use guard(preempt_notrace)
tracing: Use __free(kfree) in trace.c to remove gotos
tracing: Add guard() around locks and mutexes in trace.c
tracing: Add guard(ring_buffer_nest)
tracing: Remove unneeded goto out logic

+120 -200
+3
include/linux/ring_buffer.h
··· 144 144 void ring_buffer_nest_start(struct trace_buffer *buffer); 145 145 void ring_buffer_nest_end(struct trace_buffer *buffer); 146 146 147 + DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *, 148 + ring_buffer_nest_start(_T), ring_buffer_nest_end(_T)) 149 + 147 150 struct ring_buffer_event * 148 151 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 149 152 unsigned long *lost_events);
+6 -10
kernel/trace/ring_buffer.c
··· 4812 4812 int ret = -EBUSY; 4813 4813 int cpu; 4814 4814 4815 - preempt_disable_notrace(); 4815 + guard(preempt_notrace)(); 4816 4816 4817 4817 if (atomic_read(&buffer->record_disabled)) 4818 - goto out; 4818 + return -EBUSY; 4819 4819 4820 4820 cpu = raw_smp_processor_id(); 4821 4821 4822 4822 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4823 - goto out; 4823 + return -EBUSY; 4824 4824 4825 4825 cpu_buffer = buffer->buffers[cpu]; 4826 4826 4827 4827 if (atomic_read(&cpu_buffer->record_disabled)) 4828 - goto out; 4828 + return -EBUSY; 4829 4829 4830 4830 if (length > buffer->max_data_size) 4831 - goto out; 4831 + return -EBUSY; 4832 4832 4833 4833 if (unlikely(trace_recursive_lock(cpu_buffer))) 4834 - goto out; 4834 + return -EBUSY; 4835 4835 4836 4836 event = rb_reserve_next_event(buffer, cpu_buffer, length); 4837 4837 if (!event) ··· 4849 4849 4850 4850 out_unlock: 4851 4851 trace_recursive_unlock(cpu_buffer); 4852 - 4853 - out: 4854 - preempt_enable_notrace(); 4855 - 4856 4852 return ret; 4857 4853 } 4858 4854 EXPORT_SYMBOL_GPL(ring_buffer_write);
+102 -185
kernel/trace/trace.c
··· 432 432 { 433 433 struct trace_export *export; 434 434 435 - preempt_disable_notrace(); 435 + guard(preempt_notrace)(); 436 436 437 437 export = rcu_dereference_raw_check(ftrace_exports_list); 438 438 while (export) { 439 439 trace_process_export(export, event, flag); 440 440 export = rcu_dereference_raw_check(export->next); 441 441 } 442 - 443 - preempt_enable_notrace(); 444 442 } 445 443 446 444 static inline void ··· 495 497 if (WARN_ON_ONCE(!export->write)) 496 498 return -1; 497 499 498 - mutex_lock(&ftrace_export_lock); 500 + guard(mutex)(&ftrace_export_lock); 499 501 500 502 add_ftrace_export(&ftrace_exports_list, export); 501 - 502 - mutex_unlock(&ftrace_export_lock); 503 503 504 504 return 0; 505 505 } ··· 505 509 506 510 int unregister_ftrace_export(struct trace_export *export) 507 511 { 508 - int ret; 509 - 510 - mutex_lock(&ftrace_export_lock); 511 - 512 - ret = rm_ftrace_export(&ftrace_exports_list, export); 513 - 514 - mutex_unlock(&ftrace_export_lock); 515 - 516 - return ret; 512 + guard(mutex)(&ftrace_export_lock); 513 + return rm_ftrace_export(&ftrace_exports_list, export); 517 514 } 518 515 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 519 516 ··· 629 640 if (!this_tr) 630 641 return; 631 642 632 - mutex_lock(&trace_types_lock); 643 + guard(mutex)(&trace_types_lock); 633 644 __trace_array_put(this_tr); 634 - mutex_unlock(&trace_types_lock); 635 645 } 636 646 EXPORT_SYMBOL_GPL(trace_array_put); 637 647 ··· 1148 1160 1149 1161 trace_ctx = tracing_gen_ctx(); 1150 1162 buffer = tr->array_buffer.buffer; 1151 - ring_buffer_nest_start(buffer); 1163 + guard(ring_buffer_nest)(buffer); 1152 1164 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1153 1165 trace_ctx); 1154 - if (!event) { 1155 - size = 0; 1156 - goto out; 1157 - } 1166 + if (!event) 1167 + return 0; 1158 1168 1159 1169 entry = ring_buffer_event_data(event); 1160 1170 entry->ip = ip; ··· 1168 1182 1169 1183 __buffer_unlock_commit(buffer, event); 1170 1184 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1171 - out: 1172 - ring_buffer_nest_end(buffer); 1173 1185 return size; 1174 1186 } 1175 1187 EXPORT_SYMBOL_GPL(__trace_array_puts); ··· 1197 1213 struct bputs_entry *entry; 1198 1214 unsigned int trace_ctx; 1199 1215 int size = sizeof(struct bputs_entry); 1200 - int ret = 0; 1201 1216 1202 1217 if (!printk_binsafe(tr)) 1203 1218 return __trace_puts(ip, str, strlen(str)); ··· 1210 1227 trace_ctx = tracing_gen_ctx(); 1211 1228 buffer = tr->array_buffer.buffer; 1212 1229 1213 - ring_buffer_nest_start(buffer); 1230 + guard(ring_buffer_nest)(buffer); 1214 1231 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1215 1232 trace_ctx); 1216 1233 if (!event) 1217 - goto out; 1234 + return 0; 1218 1235 1219 1236 entry = ring_buffer_event_data(event); 1220 1237 entry->ip = ip; ··· 1223 1240 __buffer_unlock_commit(buffer, event); 1224 1241 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1225 1242 1226 - ret = 1; 1227 - out: 1228 - ring_buffer_nest_end(buffer); 1229 - return ret; 1243 + return 1; 1230 1244 } 1231 1245 EXPORT_SYMBOL_GPL(__trace_bputs); 1232 1246 ··· 1412 1432 1413 1433 int tracing_arm_snapshot(struct trace_array *tr) 1414 1434 { 1415 - int ret; 1416 - 1417 - mutex_lock(&trace_types_lock); 1418 - ret = tracing_arm_snapshot_locked(tr); 1419 - mutex_unlock(&trace_types_lock); 1420 - 1421 - return ret; 1435 + guard(mutex)(&trace_types_lock); 1436 + return tracing_arm_snapshot_locked(tr); 1422 1437 } 1423 1438 1424 1439 void tracing_disarm_snapshot(struct trace_array *tr) ··· 1816 1841 1817 1842 ret = get_user(ch, ubuf++); 1818 1843 if (ret) 1819 - goto out; 1844 + return ret; 1820 1845 1821 1846 read++; 1822 1847 cnt--; ··· 1830 1855 while (cnt && isspace(ch)) { 1831 1856 ret = get_user(ch, ubuf++); 1832 1857 if (ret) 1833 - goto out; 1858 + return ret; 1834 1859 read++; 1835 1860 cnt--; 1836 1861 } ··· 1840 1865 /* only spaces were written */ 1841 1866 if (isspace(ch) || !ch) { 1842 1867 *ppos += read; 1843 - ret = read; 1844 - goto out; 1868 + return read; 1845 1869 } 1846 1870 } 1847 1871 ··· 1848 1874 while (cnt && !isspace(ch) && ch) { 1849 1875 if (parser->idx < parser->size - 1) 1850 1876 parser->buffer[parser->idx++] = ch; 1851 - else { 1852 - ret = -EINVAL; 1853 - goto out; 1854 - } 1877 + else 1878 + return -EINVAL; 1879 + 1855 1880 ret = get_user(ch, ubuf++); 1856 1881 if (ret) 1857 - goto out; 1882 + return ret; 1858 1883 read++; 1859 1884 cnt--; 1860 1885 } ··· 1868 1895 /* Make sure the parsed string always terminates with '\0'. */ 1869 1896 parser->buffer[parser->idx] = 0; 1870 1897 } else { 1871 - ret = -EINVAL; 1872 - goto out; 1898 + return -EINVAL; 1873 1899 } 1874 1900 1875 1901 *ppos += read; 1876 - ret = read; 1877 - 1878 - out: 1879 - return ret; 1902 + return read; 1880 1903 } 1881 1904 1882 1905 /* TODO add a seq_buf_to_buffer() */ ··· 2374 2405 mutex_unlock(&trace_types_lock); 2375 2406 2376 2407 if (ret || !default_bootup_tracer) 2377 - goto out_unlock; 2408 + return ret; 2378 2409 2379 2410 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2380 - goto out_unlock; 2411 + return 0; 2381 2412 2382 2413 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2383 2414 /* Do we want this tracer to start on bootup? */ ··· 2389 2420 /* disable other selftests, since this will break it. */ 2390 2421 disable_tracing_selftest("running a tracer"); 2391 2422 2392 - out_unlock: 2393 - return ret; 2423 + return 0; 2394 2424 } 2395 2425 2396 2426 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) ··· 2466 2498 2467 2499 void tracing_reset_all_online_cpus(void) 2468 2500 { 2469 - mutex_lock(&trace_types_lock); 2501 + guard(mutex)(&trace_types_lock); 2470 2502 tracing_reset_all_online_cpus_unlocked(); 2471 - mutex_unlock(&trace_types_lock); 2472 2503 } 2473 2504 2474 2505 int is_tracing_stopped(void) ··· 2478 2511 static void tracing_start_tr(struct trace_array *tr) 2479 2512 { 2480 2513 struct trace_buffer *buffer; 2481 - unsigned long flags; 2482 2514 2483 2515 if (tracing_disabled) 2484 2516 return; 2485 2517 2486 - raw_spin_lock_irqsave(&tr->start_lock, flags); 2518 + guard(raw_spinlock_irqsave)(&tr->start_lock); 2487 2519 if (--tr->stop_count) { 2488 2520 if (WARN_ON_ONCE(tr->stop_count < 0)) { 2489 2521 /* Someone screwed up their debugging */ 2490 2522 tr->stop_count = 0; 2491 2523 } 2492 - goto out; 2524 + return; 2493 2525 } 2494 2526 2495 2527 /* Prevent the buffers from switching */ ··· 2505 2539 #endif 2506 2540 2507 2541 arch_spin_unlock(&tr->max_lock); 2508 - 2509 - out: 2510 - raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2511 2542 } 2512 2543 2513 2544 /** ··· 2522 2559 static void tracing_stop_tr(struct trace_array *tr) 2523 2560 { 2524 2561 struct trace_buffer *buffer; 2525 - unsigned long flags; 2526 2562 2527 - raw_spin_lock_irqsave(&tr->start_lock, flags); 2563 + guard(raw_spinlock_irqsave)(&tr->start_lock); 2528 2564 if (tr->stop_count++) 2529 - goto out; 2565 + return; 2530 2566 2531 2567 /* Prevent the buffers from switching */ 2532 2568 arch_spin_lock(&tr->max_lock); ··· 2541 2579 #endif 2542 2580 2543 2581 arch_spin_unlock(&tr->max_lock); 2544 - 2545 - out: 2546 - raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2547 2582 } 2548 2583 2549 2584 /** ··· 2653 2694 2654 2695 per_cpu(trace_buffered_event, cpu) = event; 2655 2696 2656 - preempt_disable(); 2657 - if (cpu == smp_processor_id() && 2658 - __this_cpu_read(trace_buffered_event) != 2659 - per_cpu(trace_buffered_event, cpu)) 2660 - WARN_ON_ONCE(1); 2661 - preempt_enable(); 2697 + scoped_guard(preempt,) { 2698 + if (cpu == smp_processor_id() && 2699 + __this_cpu_read(trace_buffered_event) != 2700 + per_cpu(trace_buffered_event, cpu)) 2701 + WARN_ON_ONCE(1); 2702 + } 2662 2703 } 2663 2704 } 2664 2705 ··· 3003 3044 skip++; 3004 3045 #endif 3005 3046 3006 - preempt_disable_notrace(); 3047 + guard(preempt_notrace)(); 3007 3048 3008 3049 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 3009 3050 ··· 3061 3102 /* Again, don't let gcc optimize things here */ 3062 3103 barrier(); 3063 3104 __this_cpu_dec(ftrace_stack_reserve); 3064 - preempt_enable_notrace(); 3065 - 3066 3105 } 3067 3106 3068 3107 static inline void ftrace_trace_stack(struct trace_array *tr, ··· 3143 3186 * prevent recursion, since the user stack tracing may 3144 3187 * trigger other kernel events. 3145 3188 */ 3146 - preempt_disable(); 3189 + guard(preempt)(); 3147 3190 if (__this_cpu_read(user_stack_count)) 3148 - goto out; 3191 + return; 3149 3192 3150 3193 __this_cpu_inc(user_stack_count); 3151 3194 ··· 3163 3206 3164 3207 out_drop_count: 3165 3208 __this_cpu_dec(user_stack_count); 3166 - out: 3167 - preempt_enable(); 3168 3209 } 3169 3210 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3170 3211 static void ftrace_trace_userstack(struct trace_array *tr, ··· 3344 3389 pause_graph_tracing(); 3345 3390 3346 3391 trace_ctx = tracing_gen_ctx(); 3347 - preempt_disable_notrace(); 3392 + guard(preempt_notrace)(); 3348 3393 3349 3394 tbuffer = get_trace_buf(); 3350 3395 if (!tbuffer) { ··· 3359 3404 3360 3405 size = sizeof(*entry) + sizeof(u32) * len; 3361 3406 buffer = tr->array_buffer.buffer; 3362 - ring_buffer_nest_start(buffer); 3363 - event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3364 - trace_ctx); 3365 - if (!event) 3366 - goto out; 3367 - entry = ring_buffer_event_data(event); 3368 - entry->ip = ip; 3369 - entry->fmt = fmt; 3407 + scoped_guard(ring_buffer_nest, buffer) { 3408 + event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3409 + trace_ctx); 3410 + if (!event) 3411 + goto out_put; 3412 + entry = ring_buffer_event_data(event); 3413 + entry->ip = ip; 3414 + entry->fmt = fmt; 3370 3415 3371 - memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3372 - __buffer_unlock_commit(buffer, event); 3373 - ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3374 - 3375 - out: 3376 - ring_buffer_nest_end(buffer); 3416 + memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3417 + __buffer_unlock_commit(buffer, event); 3418 + ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3419 + } 3377 3420 out_put: 3378 3421 put_trace_buf(); 3379 3422 3380 3423 out_nobuffer: 3381 - preempt_enable_notrace(); 3382 3424 unpause_graph_tracing(); 3383 3425 3384 3426 return len; ··· 3399 3447 pause_graph_tracing(); 3400 3448 3401 3449 trace_ctx = tracing_gen_ctx(); 3402 - preempt_disable_notrace(); 3450 + guard(preempt_notrace)(); 3403 3451 3404 3452 3405 3453 tbuffer = get_trace_buf(); ··· 3411 3459 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3412 3460 3413 3461 size = sizeof(*entry) + len + 1; 3414 - ring_buffer_nest_start(buffer); 3415 - event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3416 - trace_ctx); 3417 - if (!event) 3418 - goto out; 3419 - entry = ring_buffer_event_data(event); 3420 - entry->ip = ip; 3462 + scoped_guard(ring_buffer_nest, buffer) { 3463 + event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3464 + trace_ctx); 3465 + if (!event) 3466 + goto out; 3467 + entry = ring_buffer_event_data(event); 3468 + entry->ip = ip; 3421 3469 3422 - memcpy(&entry->buf, tbuffer, len + 1); 3423 - __buffer_unlock_commit(buffer, event); 3424 - ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); 3425 - 3470 + memcpy(&entry->buf, tbuffer, len + 1); 3471 + __buffer_unlock_commit(buffer, event); 3472 + ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); 3473 + } 3426 3474 out: 3427 - ring_buffer_nest_end(buffer); 3428 3475 put_trace_buf(); 3429 3476 3430 3477 out_nobuffer: 3431 - preempt_enable_notrace(); 3432 3478 unpause_graph_tracing(); 3433 3479 3434 3480 return len; ··· 4750 4800 if (ret) 4751 4801 return ret; 4752 4802 4753 - mutex_lock(&event_mutex); 4803 + guard(mutex)(&event_mutex); 4754 4804 4755 4805 /* Fail if the file is marked for removal */ 4756 4806 if (file->flags & EVENT_FILE_FL_FREED) { 4757 4807 trace_array_put(file->tr); 4758 - ret = -ENODEV; 4808 + return -ENODEV; 4759 4809 } else { 4760 4810 event_file_get(file); 4761 4811 } 4762 - 4763 - mutex_unlock(&event_mutex); 4764 - if (ret) 4765 - return ret; 4766 4812 4767 4813 filp->private_data = inode->i_private; 4768 4814 ··· 5036 5090 size_t count, loff_t *ppos) 5037 5091 { 5038 5092 struct trace_array *tr = file_inode(filp)->i_private; 5039 - char *mask_str; 5093 + char *mask_str __free(kfree) = NULL; 5040 5094 int len; 5041 5095 5042 5096 len = snprintf(NULL, 0, "%*pb\n", ··· 5047 5101 5048 5102 len = snprintf(mask_str, len, "%*pb\n", 5049 5103 cpumask_pr_args(tr->tracing_cpumask)); 5050 - if (len >= count) { 5051 - count = -EINVAL; 5052 - goto out_err; 5053 - } 5054 - count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5104 + if (len >= count) 5105 + return -EINVAL; 5055 5106 5056 - out_err: 5057 - kfree(mask_str); 5058 - 5059 - return count; 5107 + return simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5060 5108 } 5061 5109 5062 5110 int tracing_set_cpumask(struct trace_array *tr, ··· 5897 5957 char buf[MAX_TRACER_SIZE+2]; 5898 5958 int r; 5899 5959 5900 - mutex_lock(&trace_types_lock); 5901 - r = sprintf(buf, "%s\n", tr->current_trace->name); 5902 - mutex_unlock(&trace_types_lock); 5960 + scoped_guard(mutex, &trace_types_lock) { 5961 + r = sprintf(buf, "%s\n", tr->current_trace->name); 5962 + } 5903 5963 5904 5964 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5905 5965 } ··· 6201 6261 { 6202 6262 int ret = 0; 6203 6263 6204 - mutex_lock(&trace_types_lock); 6264 + guard(mutex)(&trace_types_lock); 6205 6265 6206 6266 update_last_data(tr); 6207 6267 6208 6268 if (!tr->ring_buffer_expanded) 6209 6269 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6210 6270 RING_BUFFER_ALL_CPUS); 6211 - mutex_unlock(&trace_types_lock); 6212 - 6213 6271 return ret; 6214 6272 } 6215 6273 ··· 6504 6566 if (ret) 6505 6567 return ret; 6506 6568 6507 - mutex_lock(&trace_types_lock); 6569 + guard(mutex)(&trace_types_lock); 6508 6570 cpu = tracing_get_cpu(inode); 6509 6571 ret = open_pipe_on_cpu(tr, cpu); 6510 6572 if (ret) ··· 6548 6610 6549 6611 tr->trace_ref++; 6550 6612 6551 - mutex_unlock(&trace_types_lock); 6552 6613 return ret; 6553 6614 6554 6615 fail: ··· 6556 6619 close_pipe_on_cpu(tr, cpu); 6557 6620 fail_pipe_on_cpu: 6558 6621 __trace_array_put(tr); 6559 - mutex_unlock(&trace_types_lock); 6560 6622 return ret; 6561 6623 } 6562 6624 ··· 6564 6628 struct trace_iterator *iter = file->private_data; 6565 6629 struct trace_array *tr = inode->i_private; 6566 6630 6567 - mutex_lock(&trace_types_lock); 6631 + scoped_guard(mutex, &trace_types_lock) { 6632 + tr->trace_ref--; 6568 6633 6569 - tr->trace_ref--; 6570 - 6571 - if (iter->trace->pipe_close) 6572 - iter->trace->pipe_close(iter); 6573 - close_pipe_on_cpu(tr, iter->cpu_file); 6574 - mutex_unlock(&trace_types_lock); 6634 + if (iter->trace->pipe_close) 6635 + iter->trace->pipe_close(iter); 6636 + close_pipe_on_cpu(tr, iter->cpu_file); 6637 + } 6575 6638 6576 6639 free_trace_iter_content(iter); 6577 6640 kfree(iter); ··· 7373 7438 if (i == ARRAY_SIZE(trace_clocks)) 7374 7439 return -EINVAL; 7375 7440 7376 - mutex_lock(&trace_types_lock); 7441 + guard(mutex)(&trace_types_lock); 7377 7442 7378 7443 tr->clock_id = i; 7379 7444 ··· 7396 7461 7397 7462 tscratch->clock_id = i; 7398 7463 } 7399 - 7400 - mutex_unlock(&trace_types_lock); 7401 7464 7402 7465 return 0; 7403 7466 } ··· 7448 7515 { 7449 7516 struct trace_array *tr = m->private; 7450 7517 7451 - mutex_lock(&trace_types_lock); 7518 + guard(mutex)(&trace_types_lock); 7452 7519 7453 7520 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7454 7521 seq_puts(m, "delta [absolute]\n"); 7455 7522 else 7456 7523 seq_puts(m, "[delta] absolute\n"); 7457 - 7458 - mutex_unlock(&trace_types_lock); 7459 7524 7460 7525 return 0; 7461 7526 } ··· 8042 8111 { 8043 8112 struct tracing_log_err *err, *next; 8044 8113 8045 - mutex_lock(&tracing_err_log_lock); 8114 + guard(mutex)(&tracing_err_log_lock); 8115 + 8046 8116 list_for_each_entry_safe(err, next, &tr->err_log, list) { 8047 8117 list_del(&err->list); 8048 8118 free_tracing_log_err(err); 8049 8119 } 8050 8120 8051 8121 tr->n_err_log_entries = 0; 8052 - mutex_unlock(&tracing_err_log_lock); 8053 8122 } 8054 8123 8055 8124 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) ··· 8320 8389 struct ftrace_buffer_info *info = file->private_data; 8321 8390 struct trace_iterator *iter = &info->iter; 8322 8391 8323 - mutex_lock(&trace_types_lock); 8392 + guard(mutex)(&trace_types_lock); 8324 8393 8325 8394 iter->tr->trace_ref--; 8326 8395 ··· 8330 8399 ring_buffer_free_read_page(iter->array_buffer->buffer, 8331 8400 info->spare_cpu, info->spare); 8332 8401 kvfree(info); 8333 - 8334 - mutex_unlock(&trace_types_lock); 8335 8402 8336 8403 return 0; 8337 8404 } ··· 8538 8609 * An ioctl call with cmd 0 to the ring buffer file will wake up all 8539 8610 * waiters 8540 8611 */ 8541 - mutex_lock(&trace_types_lock); 8612 + guard(mutex)(&trace_types_lock); 8542 8613 8543 8614 /* Make sure the waiters see the new wait_index */ 8544 8615 (void)atomic_fetch_inc_release(&iter->wait_index); 8545 8616 8546 8617 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8547 8618 8548 - mutex_unlock(&trace_types_lock); 8549 8619 return 0; 8550 8620 } 8551 8621 ··· 8885 8957 out_reg: 8886 8958 ret = tracing_arm_snapshot(tr); 8887 8959 if (ret < 0) 8888 - goto out; 8960 + return ret; 8889 8961 8890 8962 ret = register_ftrace_function_probe(glob, tr, ops, count); 8891 8963 if (ret < 0) 8892 8964 tracing_disarm_snapshot(tr); 8893 - out: 8965 + 8894 8966 return ret < 0 ? ret : 0; 8895 8967 } 8896 8968 ··· 9034 9106 return -EINVAL; 9035 9107 9036 9108 if (!!(topt->flags->val & topt->opt->bit) != val) { 9037 - mutex_lock(&trace_types_lock); 9109 + guard(mutex)(&trace_types_lock); 9038 9110 ret = __set_tracer_option(topt->tr, topt->flags, 9039 9111 topt->opt, !val); 9040 - mutex_unlock(&trace_types_lock); 9041 9112 if (ret) 9042 9113 return ret; 9043 9114 } ··· 9345 9418 return ret; 9346 9419 9347 9420 if (buffer) { 9348 - mutex_lock(&trace_types_lock); 9421 + guard(mutex)(&trace_types_lock); 9349 9422 if (!!val == tracer_tracing_is_on(tr)) { 9350 9423 val = 0; /* do nothing */ 9351 9424 } else if (val) { ··· 9359 9432 /* Wake up any waiters */ 9360 9433 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9361 9434 } 9362 - mutex_unlock(&trace_types_lock); 9363 9435 } 9364 9436 9365 9437 (*ppos)++; ··· 9742 9816 9743 9817 static void update_tracer_options(struct trace_array *tr) 9744 9818 { 9745 - mutex_lock(&trace_types_lock); 9819 + guard(mutex)(&trace_types_lock); 9746 9820 tracer_options_updated = true; 9747 9821 __update_tracer_options(tr); 9748 - mutex_unlock(&trace_types_lock); 9749 9822 } 9750 9823 9751 9824 /* Must have trace_types_lock held */ ··· 9766 9841 { 9767 9842 struct trace_array *tr; 9768 9843 9769 - mutex_lock(&trace_types_lock); 9844 + guard(mutex)(&trace_types_lock); 9770 9845 tr = trace_array_find(instance); 9771 9846 if (tr) 9772 9847 tr->ref++; 9773 - mutex_unlock(&trace_types_lock); 9774 9848 9775 9849 return tr; 9776 9850 } ··· 10727 10803 size_t count, loff_t *ppos, 10728 10804 int (*createfn)(const char *)) 10729 10805 { 10730 - char *kbuf, *buf, *tmp; 10806 + char *kbuf __free(kfree) = NULL; 10807 + char *buf, *tmp; 10731 10808 int ret = 0; 10732 10809 size_t done = 0; 10733 10810 size_t size; ··· 10743 10818 if (size >= WRITE_BUFSIZE) 10744 10819 size = WRITE_BUFSIZE - 1; 10745 10820 10746 - if (copy_from_user(kbuf, buffer + done, size)) { 10747 - ret = -EFAULT; 10748 - goto out; 10749 - } 10821 + if (copy_from_user(kbuf, buffer + done, size)) 10822 + return -EFAULT; 10823 + 10750 10824 kbuf[size] = '\0'; 10751 10825 buf = kbuf; 10752 10826 do { ··· 10761 10837 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10762 10838 pr_warn("Line length is too long: Should be less than %d\n", 10763 10839 WRITE_BUFSIZE - 2); 10764 - ret = -EINVAL; 10765 - goto out; 10840 + return -EINVAL; 10766 10841 } 10767 10842 } 10768 10843 done += size; ··· 10774 10851 10775 10852 ret = createfn(buf); 10776 10853 if (ret) 10777 - goto out; 10854 + return ret; 10778 10855 buf += size; 10779 10856 10780 10857 } while (done < count); 10781 10858 } 10782 - ret = done; 10783 - 10784 - out: 10785 - kfree(kbuf); 10786 - 10787 - return ret; 10859 + return done; 10788 10860 } 10789 10861 10790 10862 #ifdef CONFIG_TRACER_MAX_TRACE ··· 10982 11064 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10983 11065 10984 11066 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10985 - goto out; 11067 + return -ENOMEM; 10986 11068 10987 11069 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10988 11070 goto out_free_buffer_mask; ··· 11100 11182 free_cpumask_var(global_trace.tracing_cpumask); 11101 11183 out_free_buffer_mask: 11102 11184 free_cpumask_var(tracing_buffer_mask); 11103 - out: 11104 11185 return ret; 11105 11186 } 11106 11187
+2 -4
kernel/trace/trace_events_synth.c
··· 536 536 * is being performed within another event. 537 537 */ 538 538 buffer = trace_file->tr->array_buffer.buffer; 539 - ring_buffer_nest_start(buffer); 539 + guard(ring_buffer_nest)(buffer); 540 540 541 541 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 542 542 sizeof(*entry) + fields_size); 543 543 if (!entry) 544 - goto out; 544 + return; 545 545 546 546 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 547 547 val_idx = var_ref_idx[i]; ··· 584 584 } 585 585 586 586 trace_event_buffer_commit(&fbuffer); 587 - out: 588 - ring_buffer_nest_end(buffer); 589 587 } 590 588 591 589 static void free_synth_event_print_fmt(struct trace_event_call *call)
+7 -1
kernel/trace/trace_output.c
··· 701 701 struct btf *btf; 702 702 s32 tid, nr = 0; 703 703 int a, p, x; 704 + u16 encode; 704 705 705 706 trace_seq_printf(s, "("); 706 707 ··· 745 744 trace_seq_printf(s, "0x%lx", arg); 746 745 break; 747 746 case BTF_KIND_INT: 748 - trace_seq_printf(s, "%ld", arg); 747 + encode = btf_int_encoding(t); 748 + /* Print unsigned ints as hex */ 749 + if (encode & BTF_INT_SIGNED) 750 + trace_seq_printf(s, "%ld", arg); 751 + else 752 + trace_seq_printf(s, "0x%lx", arg); 749 753 break; 750 754 case BTF_KIND_ENUM: 751 755 trace_seq_printf(s, "%ld", arg);