Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

tracing: Move snapshot code out of trace.c and into trace_snapshot.c

The trace.c file was a dumping ground for most tracing code. Start
organizing it better by moving various functions out into their own files.
Move all the snapshot code, including the max trace code into its own
trace_snapshot.c file.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20260324140145.36352d6a@gandalf.local.home
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

+1189 -1178
+1 -1
include/linux/ftrace.h
··· 31 31 #define ARCH_SUPPORTS_FTRACE_OPS 0 32 32 #endif 33 33 34 - #ifdef CONFIG_TRACING 34 + #ifdef CONFIG_TRACER_SNAPSHOT 35 35 extern void ftrace_boot_snapshot(void); 36 36 #else 37 37 static inline void ftrace_boot_snapshot(void) { }
+1
kernel/trace/Makefile
··· 69 69 obj-$(CONFIG_TRACING) += trace_stat.o 70 70 obj-$(CONFIG_TRACING) += trace_printk.o 71 71 obj-$(CONFIG_TRACING) += trace_pid.o 72 + obj-$(CONFIG_TRACER_SNAPSHOT) += trace_snapshot.o 72 73 obj-$(CONFIG_TRACING) += pid_list.o 73 74 obj-$(CONFIG_TRACING_MAP) += tracing_map.o 74 75 obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
+23 -1169
kernel/trace/trace.c
··· 47 47 #include <linux/trace.h> 48 48 #include <linux/sched/clock.h> 49 49 #include <linux/sched/rt.h> 50 - #include <linux/fsnotify.h> 51 50 #include <linux/irq_work.h> 52 51 #include <linux/workqueue.h> 53 52 #include <linux/sort.h> ··· 218 219 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 219 220 static char *default_bootup_tracer; 220 221 221 - static bool allocate_snapshot; 222 - static bool snapshot_at_boot; 223 - 224 222 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 225 223 static int boot_instance_index; 226 - 227 - static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 228 - static int boot_snapshot_index; 229 224 230 225 static int __init set_cmdline_ftrace(char *str) 231 226 { ··· 268 275 return 1; 269 276 } 270 277 __setup("traceoff_on_warning", stop_trace_on_warning); 271 - 272 - static int __init boot_alloc_snapshot(char *str) 273 - { 274 - char *slot = boot_snapshot_info + boot_snapshot_index; 275 - int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 276 - int ret; 277 - 278 - if (str[0] == '=') { 279 - str++; 280 - if (strlen(str) >= left) 281 - return -1; 282 - 283 - ret = snprintf(slot, left, "%s\t", str); 284 - boot_snapshot_index += ret; 285 - } else { 286 - allocate_snapshot = true; 287 - /* We also need the main ring buffer expanded */ 288 - trace_set_ring_buffer_expanded(NULL); 289 - } 290 - return 1; 291 - } 292 - __setup("alloc_snapshot", boot_alloc_snapshot); 293 - 294 - 295 - static int __init boot_snapshot(char *str) 296 - { 297 - snapshot_at_boot = true; 298 - boot_alloc_snapshot(str); 299 - return 1; 300 - } 301 - __setup("ftrace_boot_snapshot", boot_snapshot); 302 - 303 278 304 279 static int __init boot_instance(char *str) 305 280 { ··· 768 807 EXPORT_SYMBOL_GPL(tracing_on); 769 808 770 809 #ifdef CONFIG_TRACER_SNAPSHOT 771 - static void tracing_snapshot_instance_cond(struct trace_array *tr, 772 - void *cond_data) 773 - { 774 - unsigned long flags; 775 - 776 - if (in_nmi()) { 777 - trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 778 - trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 779 - return; 780 - } 781 - 782 - if (!tr->allocated_snapshot) { 783 - trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 784 - trace_array_puts(tr, "*** stopping trace here! ***\n"); 785 - tracer_tracing_off(tr); 786 - return; 787 - } 788 - 789 - if (tr->mapped) { 790 - trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); 791 - trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 792 - return; 793 - } 794 - 795 - /* Note, snapshot can not be used when the tracer uses it */ 796 - if (tracer_uses_snapshot(tr->current_trace)) { 797 - trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 798 - trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 799 - return; 800 - } 801 - 802 - local_irq_save(flags); 803 - update_max_tr(tr, current, smp_processor_id(), cond_data); 804 - local_irq_restore(flags); 805 - } 806 - 807 - void tracing_snapshot_instance(struct trace_array *tr) 808 - { 809 - tracing_snapshot_instance_cond(tr, NULL); 810 - } 811 - 812 810 /** 813 811 * tracing_snapshot - take a snapshot of the current buffer. 814 812 * ··· 791 871 EXPORT_SYMBOL_GPL(tracing_snapshot); 792 872 793 873 /** 794 - * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 795 - * @tr: The tracing instance to snapshot 796 - * @cond_data: The data to be tested conditionally, and possibly saved 797 - * 798 - * This is the same as tracing_snapshot() except that the snapshot is 799 - * conditional - the snapshot will only happen if the 800 - * cond_snapshot.update() implementation receiving the cond_data 801 - * returns true, which means that the trace array's cond_snapshot 802 - * update() operation used the cond_data to determine whether the 803 - * snapshot should be taken, and if it was, presumably saved it along 804 - * with the snapshot. 805 - */ 806 - void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 807 - { 808 - tracing_snapshot_instance_cond(tr, cond_data); 809 - } 810 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 811 - 812 - /** 813 - * tracing_cond_snapshot_data - get the user data associated with a snapshot 814 - * @tr: The tracing instance 815 - * 816 - * When the user enables a conditional snapshot using 817 - * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 818 - * with the snapshot. This accessor is used to retrieve it. 819 - * 820 - * Should not be called from cond_snapshot.update(), since it takes 821 - * the tr->max_lock lock, which the code calling 822 - * cond_snapshot.update() has already done. 823 - * 824 - * Returns the cond_data associated with the trace array's snapshot. 825 - */ 826 - void *tracing_cond_snapshot_data(struct trace_array *tr) 827 - { 828 - void *cond_data = NULL; 829 - 830 - local_irq_disable(); 831 - arch_spin_lock(&tr->max_lock); 832 - 833 - if (tr->cond_snapshot) 834 - cond_data = tr->cond_snapshot->cond_data; 835 - 836 - arch_spin_unlock(&tr->max_lock); 837 - local_irq_enable(); 838 - 839 - return cond_data; 840 - } 841 - EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 842 - 843 - static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 844 - struct array_buffer *size_buf, int cpu_id); 845 - static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 846 - 847 - int tracing_alloc_snapshot_instance(struct trace_array *tr) 848 - { 849 - int order; 850 - int ret; 851 - 852 - if (!tr->allocated_snapshot) { 853 - 854 - /* Make the snapshot buffer have the same order as main buffer */ 855 - order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 856 - ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order); 857 - if (ret < 0) 858 - return ret; 859 - 860 - /* allocate spare buffer */ 861 - ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, 862 - &tr->array_buffer, RING_BUFFER_ALL_CPUS); 863 - if (ret < 0) 864 - return ret; 865 - 866 - tr->allocated_snapshot = true; 867 - } 868 - 869 - return 0; 870 - } 871 - 872 - static void free_snapshot(struct trace_array *tr) 873 - { 874 - /* 875 - * We don't free the ring buffer. instead, resize it because 876 - * The max_tr ring buffer has some state (e.g. ring->clock) and 877 - * we want preserve it. 878 - */ 879 - ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, 0); 880 - ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 881 - set_buffer_entries(&tr->snapshot_buffer, 1); 882 - tracing_reset_online_cpus(&tr->snapshot_buffer); 883 - tr->allocated_snapshot = false; 884 - } 885 - 886 - static int tracing_arm_snapshot_locked(struct trace_array *tr) 887 - { 888 - int ret; 889 - 890 - lockdep_assert_held(&trace_types_lock); 891 - 892 - spin_lock(&tr->snapshot_trigger_lock); 893 - if (tr->snapshot == UINT_MAX || tr->mapped) { 894 - spin_unlock(&tr->snapshot_trigger_lock); 895 - return -EBUSY; 896 - } 897 - 898 - tr->snapshot++; 899 - spin_unlock(&tr->snapshot_trigger_lock); 900 - 901 - ret = tracing_alloc_snapshot_instance(tr); 902 - if (ret) { 903 - spin_lock(&tr->snapshot_trigger_lock); 904 - tr->snapshot--; 905 - spin_unlock(&tr->snapshot_trigger_lock); 906 - } 907 - 908 - return ret; 909 - } 910 - 911 - int tracing_arm_snapshot(struct trace_array *tr) 912 - { 913 - guard(mutex)(&trace_types_lock); 914 - return tracing_arm_snapshot_locked(tr); 915 - } 916 - 917 - void tracing_disarm_snapshot(struct trace_array *tr) 918 - { 919 - spin_lock(&tr->snapshot_trigger_lock); 920 - if (!WARN_ON(!tr->snapshot)) 921 - tr->snapshot--; 922 - spin_unlock(&tr->snapshot_trigger_lock); 923 - } 924 - 925 - /** 926 874 * tracing_alloc_snapshot - allocate snapshot buffer. 927 875 * 928 876 * This only allocates the snapshot buffer if it isn't already ··· 811 1023 return ret; 812 1024 } 813 1025 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 814 - 815 - /** 816 - * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 817 - * 818 - * This is similar to tracing_snapshot(), but it will allocate the 819 - * snapshot buffer if it isn't already allocated. Use this only 820 - * where it is safe to sleep, as the allocation may sleep. 821 - * 822 - * This causes a swap between the snapshot buffer and the current live 823 - * tracing buffer. You can use this to take snapshots of the live 824 - * trace when some condition is triggered, but continue to trace. 825 - */ 826 - void tracing_snapshot_alloc(void) 827 - { 828 - int ret; 829 - 830 - ret = tracing_alloc_snapshot(); 831 - if (ret < 0) 832 - return; 833 - 834 - tracing_snapshot(); 835 - } 836 - EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 837 - 838 - /** 839 - * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 840 - * @tr: The tracing instance 841 - * @cond_data: User data to associate with the snapshot 842 - * @update: Implementation of the cond_snapshot update function 843 - * 844 - * Check whether the conditional snapshot for the given instance has 845 - * already been enabled, or if the current tracer is already using a 846 - * snapshot; if so, return -EBUSY, else create a cond_snapshot and 847 - * save the cond_data and update function inside. 848 - * 849 - * Returns 0 if successful, error otherwise. 850 - */ 851 - int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 852 - cond_update_fn_t update) 853 - { 854 - struct cond_snapshot *cond_snapshot __free(kfree) = 855 - kzalloc_obj(*cond_snapshot); 856 - int ret; 857 - 858 - if (!cond_snapshot) 859 - return -ENOMEM; 860 - 861 - cond_snapshot->cond_data = cond_data; 862 - cond_snapshot->update = update; 863 - 864 - guard(mutex)(&trace_types_lock); 865 - 866 - if (tracer_uses_snapshot(tr->current_trace)) 867 - return -EBUSY; 868 - 869 - /* 870 - * The cond_snapshot can only change to NULL without the 871 - * trace_types_lock. We don't care if we race with it going 872 - * to NULL, but we want to make sure that it's not set to 873 - * something other than NULL when we get here, which we can 874 - * do safely with only holding the trace_types_lock and not 875 - * having to take the max_lock. 876 - */ 877 - if (tr->cond_snapshot) 878 - return -EBUSY; 879 - 880 - ret = tracing_arm_snapshot_locked(tr); 881 - if (ret) 882 - return ret; 883 - 884 - local_irq_disable(); 885 - arch_spin_lock(&tr->max_lock); 886 - tr->cond_snapshot = no_free_ptr(cond_snapshot); 887 - arch_spin_unlock(&tr->max_lock); 888 - local_irq_enable(); 889 - 890 - return 0; 891 - } 892 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 893 - 894 - /** 895 - * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 896 - * @tr: The tracing instance 897 - * 898 - * Check whether the conditional snapshot for the given instance is 899 - * enabled; if so, free the cond_snapshot associated with it, 900 - * otherwise return -EINVAL. 901 - * 902 - * Returns 0 if successful, error otherwise. 903 - */ 904 - int tracing_snapshot_cond_disable(struct trace_array *tr) 905 - { 906 - int ret = 0; 907 - 908 - local_irq_disable(); 909 - arch_spin_lock(&tr->max_lock); 910 - 911 - if (!tr->cond_snapshot) 912 - ret = -EINVAL; 913 - else { 914 - kfree(tr->cond_snapshot); 915 - tr->cond_snapshot = NULL; 916 - } 917 - 918 - arch_spin_unlock(&tr->max_lock); 919 - local_irq_enable(); 920 - 921 - tracing_disarm_snapshot(tr); 922 - 923 - return ret; 924 - } 925 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 926 1026 #else 927 1027 void tracing_snapshot(void) 928 1028 { 929 1029 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 930 1030 } 931 1031 EXPORT_SYMBOL_GPL(tracing_snapshot); 932 - void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 933 - { 934 - WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 935 - } 936 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 937 1032 int tracing_alloc_snapshot(void) 938 1033 { 939 1034 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); ··· 829 1158 tracing_snapshot(); 830 1159 } 831 1160 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 832 - void *tracing_cond_snapshot_data(struct trace_array *tr) 833 - { 834 - return NULL; 835 - } 836 - EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 837 - int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 838 - { 839 - return -ENODEV; 840 - } 841 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 842 - int tracing_snapshot_cond_disable(struct trace_array *tr) 843 - { 844 - return false; 845 - } 846 - EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 847 - #define free_snapshot(tr) do { } while (0) 848 - #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) 849 1161 #endif /* CONFIG_TRACER_SNAPSHOT */ 850 1162 851 1163 void tracer_tracing_off(struct trace_array *tr) ··· 1141 1487 1142 1488 unsigned long __read_mostly tracing_thresh; 1143 1489 1144 - #ifdef CONFIG_TRACER_MAX_TRACE 1145 - #ifdef LATENCY_FS_NOTIFY 1146 - static struct workqueue_struct *fsnotify_wq; 1147 - 1148 - static void latency_fsnotify_workfn(struct work_struct *work) 1149 - { 1150 - struct trace_array *tr = container_of(work, struct trace_array, 1151 - fsnotify_work); 1152 - fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1153 - } 1154 - 1155 - static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1156 - { 1157 - struct trace_array *tr = container_of(iwork, struct trace_array, 1158 - fsnotify_irqwork); 1159 - queue_work(fsnotify_wq, &tr->fsnotify_work); 1160 - } 1161 - 1162 - __init static int latency_fsnotify_init(void) 1163 - { 1164 - fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1165 - WQ_UNBOUND | WQ_HIGHPRI, 0); 1166 - if (!fsnotify_wq) { 1167 - pr_err("Unable to allocate tr_max_lat_wq\n"); 1168 - return -ENOMEM; 1169 - } 1170 - return 0; 1171 - } 1172 - 1173 - late_initcall_sync(latency_fsnotify_init); 1174 - 1175 - void latency_fsnotify(struct trace_array *tr) 1176 - { 1177 - if (!fsnotify_wq) 1178 - return; 1179 - /* 1180 - * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1181 - * possible that we are called from __schedule() or do_idle(), which 1182 - * could cause a deadlock. 1183 - */ 1184 - irq_work_queue(&tr->fsnotify_irqwork); 1185 - } 1186 - #endif /* !LATENCY_FS_NOTIFY */ 1187 - 1188 - static const struct file_operations tracing_max_lat_fops; 1189 - 1190 - static void trace_create_maxlat_file(struct trace_array *tr, 1191 - struct dentry *d_tracer) 1192 - { 1193 - #ifdef LATENCY_FS_NOTIFY 1194 - INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1195 - init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1196 - #endif 1197 - tr->d_max_latency = trace_create_file("tracing_max_latency", 1198 - TRACE_MODE_WRITE, 1199 - d_tracer, tr, 1200 - &tracing_max_lat_fops); 1201 - } 1202 - 1203 - /* 1204 - * Copy the new maximum trace into the separate maximum-trace 1205 - * structure. (this way the maximum trace is permanently saved, 1206 - * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1207 - */ 1208 - static void 1209 - __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1210 - { 1211 - struct array_buffer *trace_buf = &tr->array_buffer; 1212 - struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1213 - struct array_buffer *max_buf = &tr->snapshot_buffer; 1214 - struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1215 - 1216 - max_buf->cpu = cpu; 1217 - max_buf->time_start = data->preempt_timestamp; 1218 - 1219 - max_data->saved_latency = tr->max_latency; 1220 - max_data->critical_start = data->critical_start; 1221 - max_data->critical_end = data->critical_end; 1222 - 1223 - strscpy(max_data->comm, tsk->comm); 1224 - max_data->pid = tsk->pid; 1225 - /* 1226 - * If tsk == current, then use current_uid(), as that does not use 1227 - * RCU. The irq tracer can be called out of RCU scope. 1228 - */ 1229 - if (tsk == current) 1230 - max_data->uid = current_uid(); 1231 - else 1232 - max_data->uid = task_uid(tsk); 1233 - 1234 - max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1235 - max_data->policy = tsk->policy; 1236 - max_data->rt_priority = tsk->rt_priority; 1237 - 1238 - /* record this tasks comm */ 1239 - tracing_record_cmdline(tsk); 1240 - latency_fsnotify(tr); 1241 - } 1242 - #else 1243 - static inline void trace_create_maxlat_file(struct trace_array *tr, 1244 - struct dentry *d_tracer) { } 1245 - static inline void __update_max_tr(struct trace_array *tr, 1246 - struct task_struct *tsk, int cpu) { } 1247 - #endif /* CONFIG_TRACER_MAX_TRACE */ 1248 - 1249 - #ifdef CONFIG_TRACER_SNAPSHOT 1250 - /** 1251 - * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1252 - * @tr: tracer 1253 - * @tsk: the task with the latency 1254 - * @cpu: The cpu that initiated the trace. 1255 - * @cond_data: User data associated with a conditional snapshot 1256 - * 1257 - * Flip the buffers between the @tr and the max_tr and record information 1258 - * about which task was the cause of this latency. 1259 - */ 1260 - void 1261 - update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1262 - void *cond_data) 1263 - { 1264 - if (tr->stop_count) 1265 - return; 1266 - 1267 - WARN_ON_ONCE(!irqs_disabled()); 1268 - 1269 - if (!tr->allocated_snapshot) { 1270 - /* Only the nop tracer should hit this when disabling */ 1271 - WARN_ON_ONCE(tr->current_trace != &nop_trace); 1272 - return; 1273 - } 1274 - 1275 - arch_spin_lock(&tr->max_lock); 1276 - 1277 - /* Inherit the recordable setting from array_buffer */ 1278 - if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1279 - ring_buffer_record_on(tr->snapshot_buffer.buffer); 1280 - else 1281 - ring_buffer_record_off(tr->snapshot_buffer.buffer); 1282 - 1283 - if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1284 - arch_spin_unlock(&tr->max_lock); 1285 - return; 1286 - } 1287 - 1288 - swap(tr->array_buffer.buffer, tr->snapshot_buffer.buffer); 1289 - 1290 - __update_max_tr(tr, tsk, cpu); 1291 - 1292 - arch_spin_unlock(&tr->max_lock); 1293 - 1294 - /* Any waiters on the old snapshot buffer need to wake up */ 1295 - ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); 1296 - } 1297 - 1298 - /** 1299 - * update_max_tr_single - only copy one trace over, and reset the rest 1300 - * @tr: tracer 1301 - * @tsk: task with the latency 1302 - * @cpu: the cpu of the buffer to copy. 1303 - * 1304 - * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1305 - */ 1306 - void 1307 - update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1308 - { 1309 - int ret; 1310 - 1311 - if (tr->stop_count) 1312 - return; 1313 - 1314 - WARN_ON_ONCE(!irqs_disabled()); 1315 - if (!tr->allocated_snapshot) { 1316 - /* Only the nop tracer should hit this when disabling */ 1317 - WARN_ON_ONCE(tr->current_trace != &nop_trace); 1318 - return; 1319 - } 1320 - 1321 - arch_spin_lock(&tr->max_lock); 1322 - 1323 - ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu); 1324 - 1325 - if (ret == -EBUSY) { 1326 - /* 1327 - * We failed to swap the buffer due to a commit taking 1328 - * place on this CPU. We fail to record, but we reset 1329 - * the max trace buffer (no one writes directly to it) 1330 - * and flag that it failed. 1331 - * Another reason is resize is in progress. 1332 - */ 1333 - trace_array_printk_buf(tr->snapshot_buffer.buffer, _THIS_IP_, 1334 - "Failed to swap buffers due to commit or resize in progress\n"); 1335 - } 1336 - 1337 - WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1338 - 1339 - __update_max_tr(tr, tsk, cpu); 1340 - arch_spin_unlock(&tr->max_lock); 1341 - } 1342 - #endif /* CONFIG_TRACER_SNAPSHOT */ 1343 - 1344 1490 struct pipe_wait { 1345 1491 struct trace_iterator *iter; 1346 1492 int wait_index; ··· 1449 1995 return 0; 1450 1996 } 1451 1997 1452 - static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 1998 + void tracing_reset_cpu(struct array_buffer *buf, int cpu) 1453 1999 { 1454 2000 struct trace_buffer *buffer = buf->buffer; 1455 2001 ··· 3214 3760 "# MAY BE MISSING FUNCTION EVENTS\n"); 3215 3761 } 3216 3762 3217 - #ifdef CONFIG_TRACER_SNAPSHOT 3218 - static void show_snapshot_main_help(struct seq_file *m) 3219 - { 3220 - seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 3221 - "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 3222 - "# Takes a snapshot of the main buffer.\n" 3223 - "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 3224 - "# (Doesn't have to be '2' works with any number that\n" 3225 - "# is not a '0' or '1')\n"); 3226 - } 3227 - 3228 - static void show_snapshot_percpu_help(struct seq_file *m) 3229 - { 3230 - seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 3231 - #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3232 - seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 3233 - "# Takes a snapshot of the main buffer for this cpu.\n"); 3234 - #else 3235 - seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 3236 - "# Must use main snapshot file to allocate.\n"); 3237 - #endif 3238 - seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 3239 - "# (Doesn't have to be '2' works with any number that\n" 3240 - "# is not a '0' or '1')\n"); 3241 - } 3242 - 3243 - static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 3244 - { 3245 - if (iter->tr->allocated_snapshot) 3246 - seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 3247 - else 3248 - seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 3249 - 3250 - seq_puts(m, "# Snapshot commands:\n"); 3251 - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 3252 - show_snapshot_main_help(m); 3253 - else 3254 - show_snapshot_percpu_help(m); 3255 - } 3256 - #else 3257 - /* Should never be called */ 3258 - static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 3259 - #endif 3260 - 3261 3763 static int s_show(struct seq_file *m, void *v) 3262 3764 { 3263 3765 struct trace_iterator *iter = v; ··· 3262 3852 return 0; 3263 3853 } 3264 3854 3265 - /* 3266 - * Should be used after trace_array_get(), trace_types_lock 3267 - * ensures that i_cdev was already initialized. 3268 - */ 3269 - static inline int tracing_get_cpu(struct inode *inode) 3270 - { 3271 - if (inode->i_cdev) /* See trace_create_cpu_file() */ 3272 - return (long)inode->i_cdev - 1; 3273 - return RING_BUFFER_ALL_CPUS; 3274 - } 3275 - 3276 3855 static const struct seq_operations tracer_seq_ops = { 3277 3856 .start = s_start, 3278 3857 .next = s_next, ··· 3288 3889 free_cpumask_var(iter->started); 3289 3890 } 3290 3891 3291 - static struct trace_iterator * 3892 + struct trace_iterator * 3292 3893 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 3293 3894 { 3294 3895 struct trace_array *tr = inode->i_private; ··· 3468 4069 return single_release(inode, filp); 3469 4070 } 3470 4071 3471 - static int tracing_release(struct inode *inode, struct file *file) 4072 + int tracing_release(struct inode *inode, struct file *file) 3472 4073 { 3473 4074 struct trace_array *tr = inode->i_private; 3474 4075 struct seq_file *m = file->private_data; ··· 4619 5220 return t->init(tr); 4620 5221 } 4621 5222 4622 - static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 5223 + void trace_set_buffer_entries(struct array_buffer *buf, unsigned long val) 4623 5224 { 4624 5225 int cpu; 4625 5226 ··· 4630 5231 static void update_buffer_entries(struct array_buffer *buf, int cpu) 4631 5232 { 4632 5233 if (cpu == RING_BUFFER_ALL_CPUS) { 4633 - set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 5234 + trace_set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 4634 5235 } else { 4635 5236 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 4636 5237 } 4637 5238 } 4638 - 4639 - #ifdef CONFIG_TRACER_SNAPSHOT 4640 - /* resize @tr's buffer to the size of @size_tr's entries */ 4641 - static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 4642 - struct array_buffer *size_buf, int cpu_id) 4643 - { 4644 - int cpu, ret = 0; 4645 - 4646 - if (cpu_id == RING_BUFFER_ALL_CPUS) { 4647 - for_each_tracing_cpu(cpu) { 4648 - ret = ring_buffer_resize(trace_buf->buffer, 4649 - per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 4650 - if (ret < 0) 4651 - break; 4652 - per_cpu_ptr(trace_buf->data, cpu)->entries = 4653 - per_cpu_ptr(size_buf->data, cpu)->entries; 4654 - } 4655 - } else { 4656 - ret = ring_buffer_resize(trace_buf->buffer, 4657 - per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 4658 - if (ret == 0) 4659 - per_cpu_ptr(trace_buf->data, cpu_id)->entries = 4660 - per_cpu_ptr(size_buf->data, cpu_id)->entries; 4661 - } 4662 - 4663 - return ret; 4664 - } 4665 - #endif /* CONFIG_TRACER_SNAPSHOT */ 4666 5239 4667 5240 static int __tracing_resize_ring_buffer(struct trace_array *tr, 4668 5241 unsigned long size, int cpu) ··· 5054 5683 return ret; 5055 5684 } 5056 5685 5057 - static ssize_t 5058 - tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 5059 - size_t cnt, loff_t *ppos) 5686 + ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 5687 + size_t cnt, loff_t *ppos) 5060 5688 { 5061 5689 char buf[64]; 5062 5690 int r; ··· 5067 5697 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5068 5698 } 5069 5699 5070 - static ssize_t 5071 - tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 5072 - size_t cnt, loff_t *ppos) 5700 + ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 5701 + size_t cnt, loff_t *ppos) 5073 5702 { 5074 5703 unsigned long val; 5075 5704 int ret; ··· 5109 5740 5110 5741 return cnt; 5111 5742 } 5112 - 5113 - #ifdef CONFIG_TRACER_MAX_TRACE 5114 - 5115 - static ssize_t 5116 - tracing_max_lat_read(struct file *filp, char __user *ubuf, 5117 - size_t cnt, loff_t *ppos) 5118 - { 5119 - struct trace_array *tr = filp->private_data; 5120 - 5121 - return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 5122 - } 5123 - 5124 - static ssize_t 5125 - tracing_max_lat_write(struct file *filp, const char __user *ubuf, 5126 - size_t cnt, loff_t *ppos) 5127 - { 5128 - struct trace_array *tr = filp->private_data; 5129 - 5130 - return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 5131 - } 5132 - 5133 - #endif 5134 5743 5135 5744 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 5136 5745 { ··· 6487 7140 return ring_buffer_event_time_stamp(buffer, rbe); 6488 7141 } 6489 7142 6490 - struct ftrace_buffer_info { 6491 - struct trace_iterator iter; 6492 - void *spare; 6493 - unsigned int spare_cpu; 6494 - unsigned int spare_size; 6495 - unsigned int read; 6496 - }; 6497 - 6498 - #ifdef CONFIG_TRACER_SNAPSHOT 6499 - static int tracing_snapshot_open(struct inode *inode, struct file *file) 6500 - { 6501 - struct trace_array *tr = inode->i_private; 6502 - struct trace_iterator *iter; 6503 - struct seq_file *m; 6504 - int ret; 6505 - 6506 - ret = tracing_check_open_get_tr(tr); 6507 - if (ret) 6508 - return ret; 6509 - 6510 - if (file->f_mode & FMODE_READ) { 6511 - iter = __tracing_open(inode, file, true); 6512 - if (IS_ERR(iter)) 6513 - ret = PTR_ERR(iter); 6514 - } else { 6515 - /* Writes still need the seq_file to hold the private data */ 6516 - ret = -ENOMEM; 6517 - m = kzalloc_obj(*m); 6518 - if (!m) 6519 - goto out; 6520 - iter = kzalloc_obj(*iter); 6521 - if (!iter) { 6522 - kfree(m); 6523 - goto out; 6524 - } 6525 - ret = 0; 6526 - 6527 - iter->tr = tr; 6528 - iter->array_buffer = &tr->snapshot_buffer; 6529 - iter->cpu_file = tracing_get_cpu(inode); 6530 - m->private = iter; 6531 - file->private_data = m; 6532 - } 6533 - out: 6534 - if (ret < 0) 6535 - trace_array_put(tr); 6536 - 6537 - return ret; 6538 - } 6539 - 6540 - static void tracing_swap_cpu_buffer(void *tr) 6541 - { 6542 - update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 6543 - } 6544 - 6545 - static ssize_t 6546 - tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 6547 - loff_t *ppos) 6548 - { 6549 - struct seq_file *m = filp->private_data; 6550 - struct trace_iterator *iter = m->private; 6551 - struct trace_array *tr = iter->tr; 6552 - unsigned long val; 6553 - int ret; 6554 - 6555 - ret = tracing_update_buffers(tr); 6556 - if (ret < 0) 6557 - return ret; 6558 - 6559 - ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6560 - if (ret) 6561 - return ret; 6562 - 6563 - guard(mutex)(&trace_types_lock); 6564 - 6565 - if (tracer_uses_snapshot(tr->current_trace)) 6566 - return -EBUSY; 6567 - 6568 - local_irq_disable(); 6569 - arch_spin_lock(&tr->max_lock); 6570 - if (tr->cond_snapshot) 6571 - ret = -EBUSY; 6572 - arch_spin_unlock(&tr->max_lock); 6573 - local_irq_enable(); 6574 - if (ret) 6575 - return ret; 6576 - 6577 - switch (val) { 6578 - case 0: 6579 - if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 6580 - return -EINVAL; 6581 - if (tr->allocated_snapshot) 6582 - free_snapshot(tr); 6583 - break; 6584 - case 1: 6585 - /* Only allow per-cpu swap if the ring buffer supports it */ 6586 - #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 6587 - if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 6588 - return -EINVAL; 6589 - #endif 6590 - if (tr->allocated_snapshot) 6591 - ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, 6592 - &tr->array_buffer, iter->cpu_file); 6593 - 6594 - ret = tracing_arm_snapshot_locked(tr); 6595 - if (ret) 6596 - return ret; 6597 - 6598 - /* Now, we're going to swap */ 6599 - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 6600 - local_irq_disable(); 6601 - update_max_tr(tr, current, smp_processor_id(), NULL); 6602 - local_irq_enable(); 6603 - } else { 6604 - smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 6605 - (void *)tr, 1); 6606 - } 6607 - tracing_disarm_snapshot(tr); 6608 - break; 6609 - default: 6610 - if (tr->allocated_snapshot) { 6611 - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 6612 - tracing_reset_online_cpus(&tr->snapshot_buffer); 6613 - else 6614 - tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file); 6615 - } 6616 - break; 6617 - } 6618 - 6619 - if (ret >= 0) { 6620 - *ppos += cnt; 6621 - ret = cnt; 6622 - } 6623 - 6624 - return ret; 6625 - } 6626 - 6627 - static int tracing_snapshot_release(struct inode *inode, struct file *file) 6628 - { 6629 - struct seq_file *m = file->private_data; 6630 - int ret; 6631 - 6632 - ret = tracing_release(inode, file); 6633 - 6634 - if (file->f_mode & FMODE_READ) 6635 - return ret; 6636 - 6637 - /* If write only, the seq_file is just a stub */ 6638 - if (m) 6639 - kfree(m->private); 6640 - kfree(m); 6641 - 6642 - return 0; 6643 - } 6644 - 6645 - static int tracing_buffers_open(struct inode *inode, struct file *filp); 6646 - static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 6647 - size_t count, loff_t *ppos); 6648 - static int tracing_buffers_release(struct inode *inode, struct file *file); 6649 - static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 6650 - struct pipe_inode_info *pipe, size_t len, unsigned int flags); 6651 - 6652 - static int snapshot_raw_open(struct inode *inode, struct file *filp) 6653 - { 6654 - struct ftrace_buffer_info *info; 6655 - int ret; 6656 - 6657 - /* The following checks for tracefs lockdown */ 6658 - ret = tracing_buffers_open(inode, filp); 6659 - if (ret < 0) 6660 - return ret; 6661 - 6662 - info = filp->private_data; 6663 - 6664 - if (tracer_uses_snapshot(info->iter.trace)) { 6665 - tracing_buffers_release(inode, filp); 6666 - return -EBUSY; 6667 - } 6668 - 6669 - info->iter.snapshot = true; 6670 - info->iter.array_buffer = &info->iter.tr->snapshot_buffer; 6671 - 6672 - return ret; 6673 - } 6674 - 6675 - #endif /* CONFIG_TRACER_SNAPSHOT */ 6676 - 6677 - 6678 7143 static const struct file_operations tracing_thresh_fops = { 6679 7144 .open = tracing_open_generic, 6680 7145 .read = tracing_thresh_read, 6681 7146 .write = tracing_thresh_write, 6682 7147 .llseek = generic_file_llseek, 6683 7148 }; 6684 - 6685 - #ifdef CONFIG_TRACER_MAX_TRACE 6686 - static const struct file_operations tracing_max_lat_fops = { 6687 - .open = tracing_open_generic_tr, 6688 - .read = tracing_max_lat_read, 6689 - .write = tracing_max_lat_write, 6690 - .llseek = generic_file_llseek, 6691 - .release = tracing_release_generic_tr, 6692 - }; 6693 - #endif 6694 7149 6695 7150 static const struct file_operations set_tracer_fops = { 6696 7151 .open = tracing_open_generic_tr, ··· 6579 7430 .llseek = seq_lseek, 6580 7431 .release = tracing_seq_release, 6581 7432 }; 6582 - 6583 - #ifdef CONFIG_TRACER_SNAPSHOT 6584 - static const struct file_operations snapshot_fops = { 6585 - .open = tracing_snapshot_open, 6586 - .read = seq_read, 6587 - .write = tracing_snapshot_write, 6588 - .llseek = tracing_lseek, 6589 - .release = tracing_snapshot_release, 6590 - }; 6591 - 6592 - static const struct file_operations snapshot_raw_fops = { 6593 - .open = snapshot_raw_open, 6594 - .read = tracing_buffers_read, 6595 - .release = tracing_buffers_release, 6596 - .splice_read = tracing_buffers_splice_read, 6597 - }; 6598 - 6599 - #endif /* CONFIG_TRACER_SNAPSHOT */ 6600 7433 6601 7434 /* 6602 7435 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct ··· 6939 7808 .release = tracing_err_log_release, 6940 7809 }; 6941 7810 6942 - static int tracing_buffers_open(struct inode *inode, struct file *filp) 7811 + int tracing_buffers_open(struct inode *inode, struct file *filp) 6943 7812 { 6944 7813 struct trace_array *tr = inode->i_private; 6945 7814 struct ftrace_buffer_info *info; ··· 6987 7856 return trace_poll(iter, filp, poll_table); 6988 7857 } 6989 7858 6990 - static ssize_t 6991 - tracing_buffers_read(struct file *filp, char __user *ubuf, 6992 - size_t count, loff_t *ppos) 7859 + ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7860 + size_t count, loff_t *ppos) 6993 7861 { 6994 7862 struct ftrace_buffer_info *info = filp->private_data; 6995 7863 struct trace_iterator *iter = &info->iter; ··· 7089 7959 return 0; 7090 7960 } 7091 7961 7092 - static int tracing_buffers_release(struct inode *inode, struct file *file) 7962 + int tracing_buffers_release(struct inode *inode, struct file *file) 7093 7963 { 7094 7964 struct ftrace_buffer_info *info = file->private_data; 7095 7965 struct trace_iterator *iter = &info->iter; ··· 7163 8033 spd->partial[i].private = 0; 7164 8034 } 7165 8035 7166 - static ssize_t 7167 - tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7168 - struct pipe_inode_info *pipe, size_t len, 7169 - unsigned int flags) 8036 + ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8037 + struct pipe_inode_info *pipe, size_t len, 8038 + unsigned int flags) 7170 8039 { 7171 8040 struct ftrace_buffer_info *info = file->private_data; 7172 8041 struct trace_iterator *iter = &info->iter; ··· 7318 8189 7319 8190 return 0; 7320 8191 } 7321 - 7322 - #ifdef CONFIG_TRACER_SNAPSHOT 7323 - static int get_snapshot_map(struct trace_array *tr) 7324 - { 7325 - int err = 0; 7326 - 7327 - /* 7328 - * Called with mmap_lock held. lockdep would be unhappy if we would now 7329 - * take trace_types_lock. Instead use the specific 7330 - * snapshot_trigger_lock. 7331 - */ 7332 - spin_lock(&tr->snapshot_trigger_lock); 7333 - 7334 - if (tr->snapshot || tr->mapped == UINT_MAX) 7335 - err = -EBUSY; 7336 - else 7337 - tr->mapped++; 7338 - 7339 - spin_unlock(&tr->snapshot_trigger_lock); 7340 - 7341 - /* Wait for update_max_tr() to observe iter->tr->mapped */ 7342 - if (tr->mapped == 1) 7343 - synchronize_rcu(); 7344 - 7345 - return err; 7346 - 7347 - } 7348 - static void put_snapshot_map(struct trace_array *tr) 7349 - { 7350 - spin_lock(&tr->snapshot_trigger_lock); 7351 - if (!WARN_ON(!tr->mapped)) 7352 - tr->mapped--; 7353 - spin_unlock(&tr->snapshot_trigger_lock); 7354 - } 7355 - #else 7356 - static inline int get_snapshot_map(struct trace_array *tr) { return 0; } 7357 - static inline void put_snapshot_map(struct trace_array *tr) { } 7358 - #endif 7359 8192 7360 8193 /* 7361 8194 * This is called when a VMA is duplicated (e.g., on fork()) to increment ··· 7498 8407 .llseek = generic_file_llseek, 7499 8408 }; 7500 8409 #endif /* CONFIG_DYNAMIC_FTRACE */ 7501 - 7502 - #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 7503 - static void 7504 - ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 7505 - struct trace_array *tr, struct ftrace_probe_ops *ops, 7506 - void *data) 7507 - { 7508 - tracing_snapshot_instance(tr); 7509 - } 7510 - 7511 - static void 7512 - ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 7513 - struct trace_array *tr, struct ftrace_probe_ops *ops, 7514 - void *data) 7515 - { 7516 - struct ftrace_func_mapper *mapper = data; 7517 - long *count = NULL; 7518 - 7519 - if (mapper) 7520 - count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 7521 - 7522 - if (count) { 7523 - 7524 - if (*count <= 0) 7525 - return; 7526 - 7527 - (*count)--; 7528 - } 7529 - 7530 - tracing_snapshot_instance(tr); 7531 - } 7532 - 7533 - static int 7534 - ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 7535 - struct ftrace_probe_ops *ops, void *data) 7536 - { 7537 - struct ftrace_func_mapper *mapper = data; 7538 - long *count = NULL; 7539 - 7540 - seq_printf(m, "%ps:", (void *)ip); 7541 - 7542 - seq_puts(m, "snapshot"); 7543 - 7544 - if (mapper) 7545 - count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 7546 - 7547 - if (count) 7548 - seq_printf(m, ":count=%ld\n", *count); 7549 - else 7550 - seq_puts(m, ":unlimited\n"); 7551 - 7552 - return 0; 7553 - } 7554 - 7555 - static int 7556 - ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 7557 - unsigned long ip, void *init_data, void **data) 7558 - { 7559 - struct ftrace_func_mapper *mapper = *data; 7560 - 7561 - if (!mapper) { 7562 - mapper = allocate_ftrace_func_mapper(); 7563 - if (!mapper) 7564 - return -ENOMEM; 7565 - *data = mapper; 7566 - } 7567 - 7568 - return ftrace_func_mapper_add_ip(mapper, ip, init_data); 7569 - } 7570 - 7571 - static void 7572 - ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 7573 - unsigned long ip, void *data) 7574 - { 7575 - struct ftrace_func_mapper *mapper = data; 7576 - 7577 - if (!ip) { 7578 - if (!mapper) 7579 - return; 7580 - free_ftrace_func_mapper(mapper, NULL); 7581 - return; 7582 - } 7583 - 7584 - ftrace_func_mapper_remove_ip(mapper, ip); 7585 - } 7586 - 7587 - static struct ftrace_probe_ops snapshot_probe_ops = { 7588 - .func = ftrace_snapshot, 7589 - .print = ftrace_snapshot_print, 7590 - }; 7591 - 7592 - static struct ftrace_probe_ops snapshot_count_probe_ops = { 7593 - .func = ftrace_count_snapshot, 7594 - .print = ftrace_snapshot_print, 7595 - .init = ftrace_snapshot_init, 7596 - .free = ftrace_snapshot_free, 7597 - }; 7598 - 7599 - static int 7600 - ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 7601 - char *glob, char *cmd, char *param, int enable) 7602 - { 7603 - struct ftrace_probe_ops *ops; 7604 - void *count = (void *)-1; 7605 - char *number; 7606 - int ret; 7607 - 7608 - if (!tr) 7609 - return -ENODEV; 7610 - 7611 - /* hash funcs only work with set_ftrace_filter */ 7612 - if (!enable) 7613 - return -EINVAL; 7614 - 7615 - ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 7616 - 7617 - if (glob[0] == '!') { 7618 - ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 7619 - if (!ret) 7620 - tracing_disarm_snapshot(tr); 7621 - 7622 - return ret; 7623 - } 7624 - 7625 - if (!param) 7626 - goto out_reg; 7627 - 7628 - number = strsep(&param, ":"); 7629 - 7630 - if (!strlen(number)) 7631 - goto out_reg; 7632 - 7633 - /* 7634 - * We use the callback data field (which is a pointer) 7635 - * as our counter. 7636 - */ 7637 - ret = kstrtoul(number, 0, (unsigned long *)&count); 7638 - if (ret) 7639 - return ret; 7640 - 7641 - out_reg: 7642 - ret = tracing_arm_snapshot(tr); 7643 - if (ret < 0) 7644 - return ret; 7645 - 7646 - ret = register_ftrace_function_probe(glob, tr, ops, count); 7647 - if (ret < 0) 7648 - tracing_disarm_snapshot(tr); 7649 - 7650 - return ret < 0 ? ret : 0; 7651 - } 7652 - 7653 - static struct ftrace_func_command ftrace_snapshot_cmd = { 7654 - .name = "snapshot", 7655 - .func = ftrace_trace_snapshot_callback, 7656 - }; 7657 - 7658 - static __init int register_snapshot_cmd(void) 7659 - { 7660 - return register_ftrace_command(&ftrace_snapshot_cmd); 7661 - } 7662 - #else 7663 - static inline __init int register_snapshot_cmd(void) { return 0; } 7664 - #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 7665 8410 7666 8411 static struct dentry *tracing_get_dentry(struct trace_array *tr) 7667 8412 { ··· 8291 9364 memset(tscratch, 0, size); 8292 9365 } 8293 9366 8294 - static int 8295 - allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, unsigned long size) 9367 + int allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 8296 9368 { 8297 9369 enum ring_buffer_flags rb_flags; 8298 9370 struct trace_scratch *tscratch; ··· 8330 9404 } 8331 9405 8332 9406 /* Allocate the first page for all buffers */ 8333 - set_buffer_entries(&tr->array_buffer, 8334 - ring_buffer_size(tr->array_buffer.buffer, 0)); 9407 + trace_set_buffer_entries(&tr->array_buffer, 9408 + ring_buffer_size(tr->array_buffer.buffer, 0)); 8335 9409 8336 9410 return 0; 8337 9411 } ··· 8354 9428 if (ret) 8355 9429 return ret; 8356 9430 8357 - #ifdef CONFIG_TRACER_SNAPSHOT 8358 - /* Fix mapped buffer trace arrays do not have snapshot buffers */ 8359 - if (tr->range_addr_start) 8360 - return 0; 8361 - 8362 - ret = allocate_trace_buffer(tr, &tr->snapshot_buffer, 8363 - allocate_snapshot ? size : 1); 8364 - if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9431 + ret = trace_allocate_snapshot(tr, size); 9432 + if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) 8365 9433 free_trace_buffer(&tr->array_buffer); 8366 - return -ENOMEM; 8367 - } 8368 - tr->allocated_snapshot = allocate_snapshot; 8369 9434 8370 - allocate_snapshot = false; 8371 - #endif 8372 - 8373 - return 0; 9435 + return ret; 8374 9436 } 8375 9437 8376 9438 static void free_trace_buffers(struct trace_array *tr) ··· 9466 10552 return done; 9467 10553 } 9468 10554 9469 - #ifdef CONFIG_TRACER_SNAPSHOT 9470 - __init static bool tr_needs_alloc_snapshot(const char *name) 9471 - { 9472 - char *test; 9473 - int len = strlen(name); 9474 - bool ret; 9475 - 9476 - if (!boot_snapshot_index) 9477 - return false; 9478 - 9479 - if (strncmp(name, boot_snapshot_info, len) == 0 && 9480 - boot_snapshot_info[len] == '\t') 9481 - return true; 9482 - 9483 - test = kmalloc(strlen(name) + 3, GFP_KERNEL); 9484 - if (!test) 9485 - return false; 9486 - 9487 - sprintf(test, "\t%s\t", name); 9488 - ret = strstr(boot_snapshot_info, test) == NULL; 9489 - kfree(test); 9490 - return ret; 9491 - } 9492 - 9493 - __init static void do_allocate_snapshot(const char *name) 9494 - { 9495 - if (!tr_needs_alloc_snapshot(name)) 9496 - return; 9497 - 9498 - /* 9499 - * When allocate_snapshot is set, the next call to 9500 - * allocate_trace_buffers() (called by trace_array_get_by_name()) 9501 - * will allocate the snapshot buffer. That will also clear 9502 - * this flag. 9503 - */ 9504 - allocate_snapshot = true; 9505 - } 9506 - #else 9507 - static inline void do_allocate_snapshot(const char *name) { } 9508 - #endif 9509 - 9510 10555 __init static int backup_instance_area(const char *backup, 9511 10556 unsigned long *addr, phys_addr_t *size) 9512 10557 { ··· 9615 10742 } 9616 10743 } else { 9617 10744 /* Only non mapped buffers have snapshot buffers */ 9618 - if (IS_ENABLED(CONFIG_TRACER_SNAPSHOT)) 9619 - do_allocate_snapshot(name); 10745 + do_allocate_snapshot(name); 9620 10746 } 9621 10747 9622 10748 tr = trace_array_create_systems(name, NULL, addr, size); ··· 9806 10934 return &global_trace; 9807 10935 } 9808 10936 #endif 9809 - 9810 - void __init ftrace_boot_snapshot(void) 9811 - { 9812 - #ifdef CONFIG_TRACER_SNAPSHOT 9813 - struct trace_array *tr; 9814 - 9815 - if (!snapshot_at_boot) 9816 - return; 9817 - 9818 - list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9819 - if (!tr->allocated_snapshot) 9820 - continue; 9821 - 9822 - tracing_snapshot_instance(tr); 9823 - trace_array_puts(tr, "** Boot snapshot taken **\n"); 9824 - } 9825 - #endif 9826 - } 9827 10937 9828 10938 void __init early_trace_init(void) 9829 10939 {
+97 -8
kernel/trace/trace.h
··· 264 264 265 265 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); 266 266 267 + #ifdef CONFIG_TRACER_SNAPSHOT 267 268 /** 268 269 * struct cond_snapshot - conditional snapshot data and callback 269 270 * ··· 307 306 void *cond_data; 308 307 cond_update_fn_t update; 309 308 }; 309 + #endif /* CONFIG_TRACER_SNAPSHOT */ 310 310 311 311 /* 312 312 * struct trace_func_repeats - used to keep track of the consecutive ··· 677 675 void tracing_reset_all_online_cpus_unlocked(void); 678 676 int tracing_open_generic(struct inode *inode, struct file *filp); 679 677 int tracing_open_generic_tr(struct inode *inode, struct file *filp); 678 + int tracing_release(struct inode *inode, struct file *file); 680 679 int tracing_release_generic_tr(struct inode *inode, struct file *file); 681 680 int tracing_open_file_tr(struct inode *inode, struct file *filp); 682 681 int tracing_release_file_tr(struct inode *inode, struct file *filp); ··· 687 684 void tracer_tracing_off(struct trace_array *tr); 688 685 void tracer_tracing_disable(struct trace_array *tr); 689 686 void tracer_tracing_enable(struct trace_array *tr); 687 + int allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size); 690 688 struct dentry *trace_create_file(const char *name, 691 689 umode_t mode, 692 690 struct dentry *parent, 693 691 void *data, 694 692 const struct file_operations *fops); 695 693 694 + struct trace_iterator *__tracing_open(struct inode *inode, struct file *file, 695 + bool snapshot); 696 + int tracing_buffers_open(struct inode *inode, struct file *filp); 697 + ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 698 + size_t count, loff_t *ppos); 699 + int tracing_buffers_release(struct inode *inode, struct file *file); 700 + ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 701 + struct pipe_inode_info *pipe, size_t len, unsigned int flags); 702 + 703 + ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 704 + size_t cnt, loff_t *ppos); 705 + ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 706 + size_t cnt, loff_t *ppos); 707 + 708 + void trace_set_buffer_entries(struct array_buffer *buf, unsigned long val); 709 + 710 + /* 711 + * Should be used after trace_array_get(), trace_types_lock 712 + * ensures that i_cdev was already initialized. 713 + */ 714 + static inline int tracing_get_cpu(struct inode *inode) 715 + { 716 + if (inode->i_cdev) /* See trace_create_cpu_file() */ 717 + return (long)inode->i_cdev - 1; 718 + return RING_BUFFER_ALL_CPUS; 719 + } 720 + void tracing_reset_cpu(struct array_buffer *buf, int cpu); 721 + 722 + struct ftrace_buffer_info { 723 + struct trace_iterator iter; 724 + void *spare; 725 + unsigned int spare_cpu; 726 + unsigned int spare_size; 727 + unsigned int read; 728 + }; 696 729 697 730 /** 698 731 * tracer_tracing_is_on_cpu - show real state of ring buffer enabled on for a cpu ··· 867 828 { 868 829 return tracer->use_max_tr; 869 830 } 831 + void trace_create_maxlat_file(struct trace_array *tr, 832 + struct dentry *d_tracer); 870 833 #else 871 834 static inline bool tracer_uses_snapshot(struct tracer *tracer) 872 835 { 873 836 return false; 874 837 } 838 + static inline void trace_create_maxlat_file(struct trace_array *tr, 839 + struct dentry *d_tracer) { } 875 840 #endif 876 841 877 842 void trace_last_func_repeats(struct trace_array *tr, ··· 2183 2140 2184 2141 extern int trace_event_enable_disable(struct trace_event_file *file, 2185 2142 int enable, int soft_disable); 2186 - extern int tracing_alloc_snapshot(void); 2187 - extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 2188 - extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 2189 - 2190 - extern int tracing_snapshot_cond_disable(struct trace_array *tr); 2191 - extern void *tracing_cond_snapshot_data(struct trace_array *tr); 2192 2143 2193 2144 extern const char *__start___trace_bprintk_fmt[]; 2194 2145 extern const char *__stop___trace_bprintk_fmt[]; ··· 2270 2233 #endif 2271 2234 2272 2235 #ifdef CONFIG_TRACER_SNAPSHOT 2236 + extern const struct file_operations snapshot_fops; 2237 + extern const struct file_operations snapshot_raw_fops; 2238 + 2239 + /* Used when creating instances */ 2240 + int trace_allocate_snapshot(struct trace_array *tr, int size); 2241 + 2242 + int tracing_alloc_snapshot(void); 2243 + void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 2244 + int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 2245 + int tracing_snapshot_cond_disable(struct trace_array *tr); 2246 + void *tracing_cond_snapshot_data(struct trace_array *tr); 2273 2247 void tracing_snapshot_instance(struct trace_array *tr); 2274 2248 int tracing_alloc_snapshot_instance(struct trace_array *tr); 2249 + int tracing_arm_snapshot_locked(struct trace_array *tr); 2275 2250 int tracing_arm_snapshot(struct trace_array *tr); 2276 2251 void tracing_disarm_snapshot(struct trace_array *tr); 2277 - #else 2252 + void free_snapshot(struct trace_array *tr); 2253 + void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter); 2254 + int get_snapshot_map(struct trace_array *tr); 2255 + void put_snapshot_map(struct trace_array *tr); 2256 + int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 2257 + struct array_buffer *size_buf, int cpu_id); 2258 + __init void do_allocate_snapshot(const char *name); 2259 + # ifdef CONFIG_DYNAMIC_FTRACE 2260 + __init int register_snapshot_cmd(void); 2261 + # else 2262 + static inline int register_snapshot_cmd(void) { return 0; } 2263 + # endif 2264 + #else /* !CONFIG_TRACER_SNAPSHOT */ 2265 + static inline int trace_allocate_snapshot(struct trace_array *tr, int size) { return 0; } 2278 2266 static inline void tracing_snapshot_instance(struct trace_array *tr) { } 2279 2267 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 2280 2268 { 2281 2269 return 0; 2282 2270 } 2271 + static inline int tracing_arm_snapshot_locked(struct trace_array *tr) { return -EBUSY; } 2283 2272 static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; } 2284 2273 static inline void tracing_disarm_snapshot(struct trace_array *tr) { } 2285 - #endif 2274 + static inline void free_snapshot(struct trace_array *tr) {} 2275 + static inline void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 2276 + { 2277 + WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 2278 + } 2279 + static inline void *tracing_cond_snapshot_data(struct trace_array *tr) 2280 + { 2281 + return NULL; 2282 + } 2283 + static inline int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 2284 + { 2285 + return -ENODEV; 2286 + } 2287 + static inline int tracing_snapshot_cond_disable(struct trace_array *tr) 2288 + { 2289 + return false; 2290 + } 2291 + static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2292 + { 2293 + /* Should never be called */ 2294 + WARN_ONCE(1, "Snapshot print function called without snapshot configured"); 2295 + } 2296 + static inline int get_snapshot_map(struct trace_array *tr) { return 0; } 2297 + static inline void put_snapshot_map(struct trace_array *tr) { } 2298 + static inline void do_allocate_snapshot(const char *name) { } 2299 + static inline int register_snapshot_cmd(void) { return 0; } 2300 + #endif /* CONFIG_TRACER_SNAPSHOT */ 2286 2301 2287 2302 #ifdef CONFIG_PREEMPT_TRACER 2288 2303 void tracer_preempt_on(unsigned long a0, unsigned long a1);
+1067
kernel/trace/trace_snapshot.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/fsnotify.h> 3 + 4 + #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 5 + 6 + #include "trace.h" 7 + 8 + /* Used if snapshot allocated at boot */ 9 + static bool allocate_snapshot; 10 + static bool snapshot_at_boot; 11 + 12 + static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 13 + static int boot_snapshot_index; 14 + 15 + static int __init boot_alloc_snapshot(char *str) 16 + { 17 + char *slot = boot_snapshot_info + boot_snapshot_index; 18 + int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 19 + int ret; 20 + 21 + if (str[0] == '=') { 22 + str++; 23 + if (strlen(str) >= left) 24 + return -1; 25 + 26 + ret = snprintf(slot, left, "%s\t", str); 27 + boot_snapshot_index += ret; 28 + } else { 29 + allocate_snapshot = true; 30 + /* We also need the main ring buffer expanded */ 31 + trace_set_ring_buffer_expanded(NULL); 32 + } 33 + return 1; 34 + } 35 + __setup("alloc_snapshot", boot_alloc_snapshot); 36 + 37 + 38 + static int __init boot_snapshot(char *str) 39 + { 40 + snapshot_at_boot = true; 41 + boot_alloc_snapshot(str); 42 + return 1; 43 + } 44 + __setup("ftrace_boot_snapshot", boot_snapshot); 45 + static void tracing_snapshot_instance_cond(struct trace_array *tr, 46 + void *cond_data) 47 + { 48 + unsigned long flags; 49 + 50 + if (in_nmi()) { 51 + trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 52 + trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 53 + return; 54 + } 55 + 56 + if (!tr->allocated_snapshot) { 57 + trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 58 + trace_array_puts(tr, "*** stopping trace here! ***\n"); 59 + tracer_tracing_off(tr); 60 + return; 61 + } 62 + 63 + if (tr->mapped) { 64 + trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); 65 + trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 66 + return; 67 + } 68 + 69 + /* Note, snapshot can not be used when the tracer uses it */ 70 + if (tracer_uses_snapshot(tr->current_trace)) { 71 + trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 72 + trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 73 + return; 74 + } 75 + 76 + local_irq_save(flags); 77 + update_max_tr(tr, current, smp_processor_id(), cond_data); 78 + local_irq_restore(flags); 79 + } 80 + 81 + void tracing_snapshot_instance(struct trace_array *tr) 82 + { 83 + tracing_snapshot_instance_cond(tr, NULL); 84 + } 85 + 86 + /** 87 + * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 88 + * @tr: The tracing instance to snapshot 89 + * @cond_data: The data to be tested conditionally, and possibly saved 90 + * 91 + * This is the same as tracing_snapshot() except that the snapshot is 92 + * conditional - the snapshot will only happen if the 93 + * cond_snapshot.update() implementation receiving the cond_data 94 + * returns true, which means that the trace array's cond_snapshot 95 + * update() operation used the cond_data to determine whether the 96 + * snapshot should be taken, and if it was, presumably saved it along 97 + * with the snapshot. 98 + */ 99 + void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 100 + { 101 + tracing_snapshot_instance_cond(tr, cond_data); 102 + } 103 + EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 104 + 105 + /** 106 + * tracing_cond_snapshot_data - get the user data associated with a snapshot 107 + * @tr: The tracing instance 108 + * 109 + * When the user enables a conditional snapshot using 110 + * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 111 + * with the snapshot. This accessor is used to retrieve it. 112 + * 113 + * Should not be called from cond_snapshot.update(), since it takes 114 + * the tr->max_lock lock, which the code calling 115 + * cond_snapshot.update() has already done. 116 + * 117 + * Returns the cond_data associated with the trace array's snapshot. 118 + */ 119 + void *tracing_cond_snapshot_data(struct trace_array *tr) 120 + { 121 + void *cond_data = NULL; 122 + 123 + local_irq_disable(); 124 + arch_spin_lock(&tr->max_lock); 125 + 126 + if (tr->cond_snapshot) 127 + cond_data = tr->cond_snapshot->cond_data; 128 + 129 + arch_spin_unlock(&tr->max_lock); 130 + local_irq_enable(); 131 + 132 + return cond_data; 133 + } 134 + EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 135 + 136 + /* resize @tr's buffer to the size of @size_tr's entries */ 137 + int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 138 + struct array_buffer *size_buf, int cpu_id) 139 + { 140 + int cpu, ret = 0; 141 + 142 + if (cpu_id == RING_BUFFER_ALL_CPUS) { 143 + for_each_tracing_cpu(cpu) { 144 + ret = ring_buffer_resize(trace_buf->buffer, 145 + per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 146 + if (ret < 0) 147 + break; 148 + per_cpu_ptr(trace_buf->data, cpu)->entries = 149 + per_cpu_ptr(size_buf->data, cpu)->entries; 150 + } 151 + } else { 152 + ret = ring_buffer_resize(trace_buf->buffer, 153 + per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 154 + if (ret == 0) 155 + per_cpu_ptr(trace_buf->data, cpu_id)->entries = 156 + per_cpu_ptr(size_buf->data, cpu_id)->entries; 157 + } 158 + 159 + return ret; 160 + } 161 + 162 + int tracing_alloc_snapshot_instance(struct trace_array *tr) 163 + { 164 + int order; 165 + int ret; 166 + 167 + if (!tr->allocated_snapshot) { 168 + 169 + /* Make the snapshot buffer have the same order as main buffer */ 170 + order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 171 + ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order); 172 + if (ret < 0) 173 + return ret; 174 + 175 + /* allocate spare buffer */ 176 + ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, 177 + &tr->array_buffer, RING_BUFFER_ALL_CPUS); 178 + if (ret < 0) 179 + return ret; 180 + 181 + tr->allocated_snapshot = true; 182 + } 183 + 184 + return 0; 185 + } 186 + 187 + void free_snapshot(struct trace_array *tr) 188 + { 189 + /* 190 + * We don't free the ring buffer. instead, resize it because 191 + * The max_tr ring buffer has some state (e.g. ring->clock) and 192 + * we want preserve it. 193 + */ 194 + ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, 0); 195 + ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 196 + trace_set_buffer_entries(&tr->snapshot_buffer, 1); 197 + tracing_reset_online_cpus(&tr->snapshot_buffer); 198 + tr->allocated_snapshot = false; 199 + } 200 + 201 + int tracing_arm_snapshot_locked(struct trace_array *tr) 202 + { 203 + int ret; 204 + 205 + lockdep_assert_held(&trace_types_lock); 206 + 207 + spin_lock(&tr->snapshot_trigger_lock); 208 + if (tr->snapshot == UINT_MAX || tr->mapped) { 209 + spin_unlock(&tr->snapshot_trigger_lock); 210 + return -EBUSY; 211 + } 212 + 213 + tr->snapshot++; 214 + spin_unlock(&tr->snapshot_trigger_lock); 215 + 216 + ret = tracing_alloc_snapshot_instance(tr); 217 + if (ret) { 218 + spin_lock(&tr->snapshot_trigger_lock); 219 + tr->snapshot--; 220 + spin_unlock(&tr->snapshot_trigger_lock); 221 + } 222 + 223 + return ret; 224 + } 225 + 226 + int tracing_arm_snapshot(struct trace_array *tr) 227 + { 228 + guard(mutex)(&trace_types_lock); 229 + return tracing_arm_snapshot_locked(tr); 230 + } 231 + 232 + void tracing_disarm_snapshot(struct trace_array *tr) 233 + { 234 + spin_lock(&tr->snapshot_trigger_lock); 235 + if (!WARN_ON(!tr->snapshot)) 236 + tr->snapshot--; 237 + spin_unlock(&tr->snapshot_trigger_lock); 238 + } 239 + 240 + /** 241 + * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 242 + * 243 + * This is similar to tracing_snapshot(), but it will allocate the 244 + * snapshot buffer if it isn't already allocated. Use this only 245 + * where it is safe to sleep, as the allocation may sleep. 246 + * 247 + * This causes a swap between the snapshot buffer and the current live 248 + * tracing buffer. You can use this to take snapshots of the live 249 + * trace when some condition is triggered, but continue to trace. 250 + */ 251 + void tracing_snapshot_alloc(void) 252 + { 253 + int ret; 254 + 255 + ret = tracing_alloc_snapshot(); 256 + if (ret < 0) 257 + return; 258 + 259 + tracing_snapshot(); 260 + } 261 + EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 262 + 263 + /** 264 + * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 265 + * @tr: The tracing instance 266 + * @cond_data: User data to associate with the snapshot 267 + * @update: Implementation of the cond_snapshot update function 268 + * 269 + * Check whether the conditional snapshot for the given instance has 270 + * already been enabled, or if the current tracer is already using a 271 + * snapshot; if so, return -EBUSY, else create a cond_snapshot and 272 + * save the cond_data and update function inside. 273 + * 274 + * Returns 0 if successful, error otherwise. 275 + */ 276 + int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 277 + cond_update_fn_t update) 278 + { 279 + struct cond_snapshot *cond_snapshot __free(kfree) = 280 + kzalloc_obj(*cond_snapshot); 281 + int ret; 282 + 283 + if (!cond_snapshot) 284 + return -ENOMEM; 285 + 286 + cond_snapshot->cond_data = cond_data; 287 + cond_snapshot->update = update; 288 + 289 + guard(mutex)(&trace_types_lock); 290 + 291 + if (tracer_uses_snapshot(tr->current_trace)) 292 + return -EBUSY; 293 + 294 + /* 295 + * The cond_snapshot can only change to NULL without the 296 + * trace_types_lock. We don't care if we race with it going 297 + * to NULL, but we want to make sure that it's not set to 298 + * something other than NULL when we get here, which we can 299 + * do safely with only holding the trace_types_lock and not 300 + * having to take the max_lock. 301 + */ 302 + if (tr->cond_snapshot) 303 + return -EBUSY; 304 + 305 + ret = tracing_arm_snapshot_locked(tr); 306 + if (ret) 307 + return ret; 308 + 309 + local_irq_disable(); 310 + arch_spin_lock(&tr->max_lock); 311 + tr->cond_snapshot = no_free_ptr(cond_snapshot); 312 + arch_spin_unlock(&tr->max_lock); 313 + local_irq_enable(); 314 + 315 + return 0; 316 + } 317 + EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 318 + 319 + /** 320 + * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 321 + * @tr: The tracing instance 322 + * 323 + * Check whether the conditional snapshot for the given instance is 324 + * enabled; if so, free the cond_snapshot associated with it, 325 + * otherwise return -EINVAL. 326 + * 327 + * Returns 0 if successful, error otherwise. 328 + */ 329 + int tracing_snapshot_cond_disable(struct trace_array *tr) 330 + { 331 + int ret = 0; 332 + 333 + local_irq_disable(); 334 + arch_spin_lock(&tr->max_lock); 335 + 336 + if (!tr->cond_snapshot) 337 + ret = -EINVAL; 338 + else { 339 + kfree(tr->cond_snapshot); 340 + tr->cond_snapshot = NULL; 341 + } 342 + 343 + arch_spin_unlock(&tr->max_lock); 344 + local_irq_enable(); 345 + 346 + tracing_disarm_snapshot(tr); 347 + 348 + return ret; 349 + } 350 + EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 351 + 352 + #ifdef CONFIG_TRACER_MAX_TRACE 353 + #ifdef LATENCY_FS_NOTIFY 354 + static struct workqueue_struct *fsnotify_wq; 355 + 356 + static void latency_fsnotify_workfn(struct work_struct *work) 357 + { 358 + struct trace_array *tr = container_of(work, struct trace_array, 359 + fsnotify_work); 360 + fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 361 + } 362 + 363 + static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 364 + { 365 + struct trace_array *tr = container_of(iwork, struct trace_array, 366 + fsnotify_irqwork); 367 + queue_work(fsnotify_wq, &tr->fsnotify_work); 368 + } 369 + 370 + __init static int latency_fsnotify_init(void) 371 + { 372 + fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 373 + WQ_UNBOUND | WQ_HIGHPRI, 0); 374 + if (!fsnotify_wq) { 375 + pr_err("Unable to allocate tr_max_lat_wq\n"); 376 + return -ENOMEM; 377 + } 378 + return 0; 379 + } 380 + 381 + late_initcall_sync(latency_fsnotify_init); 382 + 383 + void latency_fsnotify(struct trace_array *tr) 384 + { 385 + if (!fsnotify_wq) 386 + return; 387 + /* 388 + * We cannot call queue_work(&tr->fsnotify_work) from here because it's 389 + * possible that we are called from __schedule() or do_idle(), which 390 + * could cause a deadlock. 391 + */ 392 + irq_work_queue(&tr->fsnotify_irqwork); 393 + } 394 + #else 395 + static inline void latency_fsnotify(struct trace_array *tr) { } 396 + #endif /* LATENCY_FS_NOTIFY */ 397 + static const struct file_operations tracing_max_lat_fops; 398 + 399 + void trace_create_maxlat_file(struct trace_array *tr, 400 + struct dentry *d_tracer) 401 + { 402 + #ifdef LATENCY_FS_NOTIFY 403 + INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 404 + init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 405 + #endif 406 + tr->d_max_latency = trace_create_file("tracing_max_latency", 407 + TRACE_MODE_WRITE, 408 + d_tracer, tr, 409 + &tracing_max_lat_fops); 410 + } 411 + 412 + /* 413 + * Copy the new maximum trace into the separate maximum-trace 414 + * structure. (this way the maximum trace is permanently saved, 415 + * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 416 + */ 417 + static void 418 + __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 419 + { 420 + struct array_buffer *trace_buf = &tr->array_buffer; 421 + struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 422 + struct array_buffer *max_buf = &tr->snapshot_buffer; 423 + struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 424 + 425 + max_buf->cpu = cpu; 426 + max_buf->time_start = data->preempt_timestamp; 427 + 428 + max_data->saved_latency = tr->max_latency; 429 + max_data->critical_start = data->critical_start; 430 + max_data->critical_end = data->critical_end; 431 + 432 + strscpy(max_data->comm, tsk->comm); 433 + max_data->pid = tsk->pid; 434 + /* 435 + * If tsk == current, then use current_uid(), as that does not use 436 + * RCU. The irq tracer can be called out of RCU scope. 437 + */ 438 + if (tsk == current) 439 + max_data->uid = current_uid(); 440 + else 441 + max_data->uid = task_uid(tsk); 442 + 443 + max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 444 + max_data->policy = tsk->policy; 445 + max_data->rt_priority = tsk->rt_priority; 446 + 447 + /* record this tasks comm */ 448 + tracing_record_cmdline(tsk); 449 + latency_fsnotify(tr); 450 + } 451 + #else 452 + static inline void __update_max_tr(struct trace_array *tr, 453 + struct task_struct *tsk, int cpu) { } 454 + #endif /* CONFIG_TRACER_MAX_TRACE */ 455 + 456 + /** 457 + * update_max_tr - snapshot all trace buffers from global_trace to max_tr 458 + * @tr: tracer 459 + * @tsk: the task with the latency 460 + * @cpu: The cpu that initiated the trace. 461 + * @cond_data: User data associated with a conditional snapshot 462 + * 463 + * Flip the buffers between the @tr and the max_tr and record information 464 + * about which task was the cause of this latency. 465 + */ 466 + void 467 + update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 468 + void *cond_data) 469 + { 470 + if (tr->stop_count) 471 + return; 472 + 473 + WARN_ON_ONCE(!irqs_disabled()); 474 + 475 + if (!tr->allocated_snapshot) { 476 + /* Only the nop tracer should hit this when disabling */ 477 + WARN_ON_ONCE(tr->current_trace != &nop_trace); 478 + return; 479 + } 480 + 481 + arch_spin_lock(&tr->max_lock); 482 + 483 + /* Inherit the recordable setting from array_buffer */ 484 + if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 485 + ring_buffer_record_on(tr->snapshot_buffer.buffer); 486 + else 487 + ring_buffer_record_off(tr->snapshot_buffer.buffer); 488 + 489 + if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 490 + arch_spin_unlock(&tr->max_lock); 491 + return; 492 + } 493 + 494 + swap(tr->array_buffer.buffer, tr->snapshot_buffer.buffer); 495 + 496 + __update_max_tr(tr, tsk, cpu); 497 + 498 + arch_spin_unlock(&tr->max_lock); 499 + 500 + /* Any waiters on the old snapshot buffer need to wake up */ 501 + ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); 502 + } 503 + 504 + /** 505 + * update_max_tr_single - only copy one trace over, and reset the rest 506 + * @tr: tracer 507 + * @tsk: task with the latency 508 + * @cpu: the cpu of the buffer to copy. 509 + * 510 + * Flip the trace of a single CPU buffer between the @tr and the max_tr. 511 + */ 512 + void 513 + update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 514 + { 515 + int ret; 516 + 517 + if (tr->stop_count) 518 + return; 519 + 520 + WARN_ON_ONCE(!irqs_disabled()); 521 + if (!tr->allocated_snapshot) { 522 + /* Only the nop tracer should hit this when disabling */ 523 + WARN_ON_ONCE(tr->current_trace != &nop_trace); 524 + return; 525 + } 526 + 527 + arch_spin_lock(&tr->max_lock); 528 + 529 + ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu); 530 + 531 + if (ret == -EBUSY) { 532 + /* 533 + * We failed to swap the buffer due to a commit taking 534 + * place on this CPU. We fail to record, but we reset 535 + * the max trace buffer (no one writes directly to it) 536 + * and flag that it failed. 537 + * Another reason is resize is in progress. 538 + */ 539 + trace_array_printk_buf(tr->snapshot_buffer.buffer, _THIS_IP_, 540 + "Failed to swap buffers due to commit or resize in progress\n"); 541 + } 542 + 543 + WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 544 + 545 + __update_max_tr(tr, tsk, cpu); 546 + arch_spin_unlock(&tr->max_lock); 547 + } 548 + 549 + static void show_snapshot_main_help(struct seq_file *m) 550 + { 551 + seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 552 + "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 553 + "# Takes a snapshot of the main buffer.\n" 554 + "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 555 + "# (Doesn't have to be '2' works with any number that\n" 556 + "# is not a '0' or '1')\n"); 557 + } 558 + 559 + static void show_snapshot_percpu_help(struct seq_file *m) 560 + { 561 + seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 562 + #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 563 + seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 564 + "# Takes a snapshot of the main buffer for this cpu.\n"); 565 + #else 566 + seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 567 + "# Must use main snapshot file to allocate.\n"); 568 + #endif 569 + seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 570 + "# (Doesn't have to be '2' works with any number that\n" 571 + "# is not a '0' or '1')\n"); 572 + } 573 + 574 + void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 575 + { 576 + if (iter->tr->allocated_snapshot) 577 + seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 578 + else 579 + seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 580 + 581 + seq_puts(m, "# Snapshot commands:\n"); 582 + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 583 + show_snapshot_main_help(m); 584 + else 585 + show_snapshot_percpu_help(m); 586 + } 587 + 588 + static int tracing_snapshot_open(struct inode *inode, struct file *file) 589 + { 590 + struct trace_array *tr = inode->i_private; 591 + struct trace_iterator *iter; 592 + struct seq_file *m; 593 + int ret; 594 + 595 + ret = tracing_check_open_get_tr(tr); 596 + if (ret) 597 + return ret; 598 + 599 + if (file->f_mode & FMODE_READ) { 600 + iter = __tracing_open(inode, file, true); 601 + if (IS_ERR(iter)) 602 + ret = PTR_ERR(iter); 603 + } else { 604 + /* Writes still need the seq_file to hold the private data */ 605 + ret = -ENOMEM; 606 + m = kzalloc_obj(*m); 607 + if (!m) 608 + goto out; 609 + iter = kzalloc_obj(*iter); 610 + if (!iter) { 611 + kfree(m); 612 + goto out; 613 + } 614 + ret = 0; 615 + 616 + iter->tr = tr; 617 + iter->array_buffer = &tr->snapshot_buffer; 618 + iter->cpu_file = tracing_get_cpu(inode); 619 + m->private = iter; 620 + file->private_data = m; 621 + } 622 + out: 623 + if (ret < 0) 624 + trace_array_put(tr); 625 + 626 + return ret; 627 + } 628 + 629 + static void tracing_swap_cpu_buffer(void *tr) 630 + { 631 + update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 632 + } 633 + 634 + static ssize_t 635 + tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 636 + loff_t *ppos) 637 + { 638 + struct seq_file *m = filp->private_data; 639 + struct trace_iterator *iter = m->private; 640 + struct trace_array *tr = iter->tr; 641 + unsigned long val; 642 + int ret; 643 + 644 + ret = tracing_update_buffers(tr); 645 + if (ret < 0) 646 + return ret; 647 + 648 + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 649 + if (ret) 650 + return ret; 651 + 652 + guard(mutex)(&trace_types_lock); 653 + 654 + if (tracer_uses_snapshot(tr->current_trace)) 655 + return -EBUSY; 656 + 657 + local_irq_disable(); 658 + arch_spin_lock(&tr->max_lock); 659 + if (tr->cond_snapshot) 660 + ret = -EBUSY; 661 + arch_spin_unlock(&tr->max_lock); 662 + local_irq_enable(); 663 + if (ret) 664 + return ret; 665 + 666 + switch (val) { 667 + case 0: 668 + if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 669 + return -EINVAL; 670 + if (tr->allocated_snapshot) 671 + free_snapshot(tr); 672 + break; 673 + case 1: 674 + /* Only allow per-cpu swap if the ring buffer supports it */ 675 + #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 676 + if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 677 + return -EINVAL; 678 + #endif 679 + if (tr->allocated_snapshot) 680 + ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, 681 + &tr->array_buffer, iter->cpu_file); 682 + 683 + ret = tracing_arm_snapshot_locked(tr); 684 + if (ret) 685 + return ret; 686 + 687 + /* Now, we're going to swap */ 688 + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 689 + local_irq_disable(); 690 + update_max_tr(tr, current, smp_processor_id(), NULL); 691 + local_irq_enable(); 692 + } else { 693 + smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 694 + (void *)tr, 1); 695 + } 696 + tracing_disarm_snapshot(tr); 697 + break; 698 + default: 699 + if (tr->allocated_snapshot) { 700 + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 701 + tracing_reset_online_cpus(&tr->snapshot_buffer); 702 + else 703 + tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file); 704 + } 705 + break; 706 + } 707 + 708 + if (ret >= 0) { 709 + *ppos += cnt; 710 + ret = cnt; 711 + } 712 + 713 + return ret; 714 + } 715 + 716 + static int tracing_snapshot_release(struct inode *inode, struct file *file) 717 + { 718 + struct seq_file *m = file->private_data; 719 + int ret; 720 + 721 + ret = tracing_release(inode, file); 722 + 723 + if (file->f_mode & FMODE_READ) 724 + return ret; 725 + 726 + /* If write only, the seq_file is just a stub */ 727 + if (m) 728 + kfree(m->private); 729 + kfree(m); 730 + 731 + return 0; 732 + } 733 + 734 + static int snapshot_raw_open(struct inode *inode, struct file *filp) 735 + { 736 + struct ftrace_buffer_info *info; 737 + int ret; 738 + 739 + /* The following checks for tracefs lockdown */ 740 + ret = tracing_buffers_open(inode, filp); 741 + if (ret < 0) 742 + return ret; 743 + 744 + info = filp->private_data; 745 + 746 + if (tracer_uses_snapshot(info->iter.trace)) { 747 + tracing_buffers_release(inode, filp); 748 + return -EBUSY; 749 + } 750 + 751 + info->iter.snapshot = true; 752 + info->iter.array_buffer = &info->iter.tr->snapshot_buffer; 753 + 754 + return ret; 755 + } 756 + 757 + const struct file_operations snapshot_fops = { 758 + .open = tracing_snapshot_open, 759 + .read = seq_read, 760 + .write = tracing_snapshot_write, 761 + .llseek = tracing_lseek, 762 + .release = tracing_snapshot_release, 763 + }; 764 + 765 + const struct file_operations snapshot_raw_fops = { 766 + .open = snapshot_raw_open, 767 + .read = tracing_buffers_read, 768 + .release = tracing_buffers_release, 769 + .splice_read = tracing_buffers_splice_read, 770 + }; 771 + 772 + #ifdef CONFIG_TRACER_MAX_TRACE 773 + static ssize_t 774 + tracing_max_lat_read(struct file *filp, char __user *ubuf, 775 + size_t cnt, loff_t *ppos) 776 + { 777 + struct trace_array *tr = filp->private_data; 778 + 779 + return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 780 + } 781 + 782 + static ssize_t 783 + tracing_max_lat_write(struct file *filp, const char __user *ubuf, 784 + size_t cnt, loff_t *ppos) 785 + { 786 + struct trace_array *tr = filp->private_data; 787 + 788 + return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 789 + } 790 + 791 + static const struct file_operations tracing_max_lat_fops = { 792 + .open = tracing_open_generic_tr, 793 + .read = tracing_max_lat_read, 794 + .write = tracing_max_lat_write, 795 + .llseek = generic_file_llseek, 796 + .release = tracing_release_generic_tr, 797 + }; 798 + #endif /* CONFIG_TRACER_MAX_TRACE */ 799 + 800 + int get_snapshot_map(struct trace_array *tr) 801 + { 802 + int err = 0; 803 + 804 + /* 805 + * Called with mmap_lock held. lockdep would be unhappy if we would now 806 + * take trace_types_lock. Instead use the specific 807 + * snapshot_trigger_lock. 808 + */ 809 + spin_lock(&tr->snapshot_trigger_lock); 810 + 811 + if (tr->snapshot || tr->mapped == UINT_MAX) 812 + err = -EBUSY; 813 + else 814 + tr->mapped++; 815 + 816 + spin_unlock(&tr->snapshot_trigger_lock); 817 + 818 + /* Wait for update_max_tr() to observe iter->tr->mapped */ 819 + if (tr->mapped == 1) 820 + synchronize_rcu(); 821 + 822 + return err; 823 + 824 + } 825 + 826 + void put_snapshot_map(struct trace_array *tr) 827 + { 828 + spin_lock(&tr->snapshot_trigger_lock); 829 + if (!WARN_ON(!tr->mapped)) 830 + tr->mapped--; 831 + spin_unlock(&tr->snapshot_trigger_lock); 832 + } 833 + 834 + #ifdef CONFIG_DYNAMIC_FTRACE 835 + static void 836 + ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 837 + struct trace_array *tr, struct ftrace_probe_ops *ops, 838 + void *data) 839 + { 840 + tracing_snapshot_instance(tr); 841 + } 842 + 843 + static void 844 + ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 845 + struct trace_array *tr, struct ftrace_probe_ops *ops, 846 + void *data) 847 + { 848 + struct ftrace_func_mapper *mapper = data; 849 + long *count = NULL; 850 + 851 + if (mapper) 852 + count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 853 + 854 + if (count) { 855 + 856 + if (*count <= 0) 857 + return; 858 + 859 + (*count)--; 860 + } 861 + 862 + tracing_snapshot_instance(tr); 863 + } 864 + 865 + static int 866 + ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 867 + struct ftrace_probe_ops *ops, void *data) 868 + { 869 + struct ftrace_func_mapper *mapper = data; 870 + long *count = NULL; 871 + 872 + seq_printf(m, "%ps:", (void *)ip); 873 + 874 + seq_puts(m, "snapshot"); 875 + 876 + if (mapper) 877 + count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 878 + 879 + if (count) 880 + seq_printf(m, ":count=%ld\n", *count); 881 + else 882 + seq_puts(m, ":unlimited\n"); 883 + 884 + return 0; 885 + } 886 + 887 + static int 888 + ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 889 + unsigned long ip, void *init_data, void **data) 890 + { 891 + struct ftrace_func_mapper *mapper = *data; 892 + 893 + if (!mapper) { 894 + mapper = allocate_ftrace_func_mapper(); 895 + if (!mapper) 896 + return -ENOMEM; 897 + *data = mapper; 898 + } 899 + 900 + return ftrace_func_mapper_add_ip(mapper, ip, init_data); 901 + } 902 + 903 + static void 904 + ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 905 + unsigned long ip, void *data) 906 + { 907 + struct ftrace_func_mapper *mapper = data; 908 + 909 + if (!ip) { 910 + if (!mapper) 911 + return; 912 + free_ftrace_func_mapper(mapper, NULL); 913 + return; 914 + } 915 + 916 + ftrace_func_mapper_remove_ip(mapper, ip); 917 + } 918 + 919 + static struct ftrace_probe_ops snapshot_probe_ops = { 920 + .func = ftrace_snapshot, 921 + .print = ftrace_snapshot_print, 922 + }; 923 + 924 + static struct ftrace_probe_ops snapshot_count_probe_ops = { 925 + .func = ftrace_count_snapshot, 926 + .print = ftrace_snapshot_print, 927 + .init = ftrace_snapshot_init, 928 + .free = ftrace_snapshot_free, 929 + }; 930 + 931 + static int 932 + ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 933 + char *glob, char *cmd, char *param, int enable) 934 + { 935 + struct ftrace_probe_ops *ops; 936 + void *count = (void *)-1; 937 + char *number; 938 + int ret; 939 + 940 + if (!tr) 941 + return -ENODEV; 942 + 943 + /* hash funcs only work with set_ftrace_filter */ 944 + if (!enable) 945 + return -EINVAL; 946 + 947 + ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 948 + 949 + if (glob[0] == '!') { 950 + ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 951 + if (!ret) 952 + tracing_disarm_snapshot(tr); 953 + 954 + return ret; 955 + } 956 + 957 + if (!param) 958 + goto out_reg; 959 + 960 + number = strsep(&param, ":"); 961 + 962 + if (!strlen(number)) 963 + goto out_reg; 964 + 965 + /* 966 + * We use the callback data field (which is a pointer) 967 + * as our counter. 968 + */ 969 + ret = kstrtoul(number, 0, (unsigned long *)&count); 970 + if (ret) 971 + return ret; 972 + 973 + out_reg: 974 + ret = tracing_arm_snapshot(tr); 975 + if (ret < 0) 976 + return ret; 977 + 978 + ret = register_ftrace_function_probe(glob, tr, ops, count); 979 + if (ret < 0) 980 + tracing_disarm_snapshot(tr); 981 + 982 + return ret < 0 ? ret : 0; 983 + } 984 + 985 + static struct ftrace_func_command ftrace_snapshot_cmd = { 986 + .name = "snapshot", 987 + .func = ftrace_trace_snapshot_callback, 988 + }; 989 + 990 + __init int register_snapshot_cmd(void) 991 + { 992 + return register_ftrace_command(&ftrace_snapshot_cmd); 993 + } 994 + #endif /* CONFIG_DYNAMIC_FTRACE */ 995 + 996 + int trace_allocate_snapshot(struct trace_array *tr, int size) 997 + { 998 + int ret; 999 + 1000 + /* Fix mapped buffer trace arrays do not have snapshot buffers */ 1001 + if (tr->range_addr_start) 1002 + return 0; 1003 + 1004 + /* allocate_snapshot can only be true during system boot */ 1005 + ret = allocate_trace_buffer(tr, &tr->snapshot_buffer, 1006 + allocate_snapshot ? size : 1); 1007 + if (ret < 0) 1008 + return -ENOMEM; 1009 + 1010 + tr->allocated_snapshot = allocate_snapshot; 1011 + 1012 + allocate_snapshot = false; 1013 + return 0; 1014 + } 1015 + 1016 + __init static bool tr_needs_alloc_snapshot(const char *name) 1017 + { 1018 + char *test; 1019 + int len = strlen(name); 1020 + bool ret; 1021 + 1022 + if (!boot_snapshot_index) 1023 + return false; 1024 + 1025 + if (strncmp(name, boot_snapshot_info, len) == 0 && 1026 + boot_snapshot_info[len] == '\t') 1027 + return true; 1028 + 1029 + test = kmalloc(strlen(name) + 3, GFP_KERNEL); 1030 + if (!test) 1031 + return false; 1032 + 1033 + sprintf(test, "\t%s\t", name); 1034 + ret = strstr(boot_snapshot_info, test) == NULL; 1035 + kfree(test); 1036 + return ret; 1037 + } 1038 + 1039 + __init void do_allocate_snapshot(const char *name) 1040 + { 1041 + if (!tr_needs_alloc_snapshot(name)) 1042 + return; 1043 + 1044 + /* 1045 + * When allocate_snapshot is set, the next call to 1046 + * allocate_trace_buffers() (called by trace_array_get_by_name()) 1047 + * will allocate the snapshot buffer. That will also clear 1048 + * this flag. 1049 + */ 1050 + allocate_snapshot = true; 1051 + } 1052 + 1053 + void __init ftrace_boot_snapshot(void) 1054 + { 1055 + struct trace_array *tr; 1056 + 1057 + if (!snapshot_at_boot) 1058 + return; 1059 + 1060 + list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1061 + if (!tr->allocated_snapshot) 1062 + continue; 1063 + 1064 + tracing_snapshot_instance(tr); 1065 + trace_array_puts(tr, "** Boot snapshot taken **\n"); 1066 + } 1067 + }