Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
branch tracer, intel-iommu: fix build with CONFIG_BRANCH_TRACER=y
branch tracer: Fix for enabling branch profiling makes sparse unusable
ftrace: Correct a text align for event format output
Update /debug/tracing/README
tracing/ftrace: alloc the started cpumask for the trace file
tracing, x86: remove duplicated #include
ftrace: Add check of sched_stopped for probe_sched_wakeup
function-graph: add proper initialization for init task
tracing/ftrace: fix missing include string.h
tracing: fix incorrect return type of ns2usecs()
tracing: remove CALLER_ADDR2 from wakeup tracer
blktrace: fix pdu_len when tracing packet command requests
blktrace: small cleanup in blk_msg_write()
blktrace: NUL-terminate user space messages
tracing: move scripts/trace/power.pl to scripts/tracing/power.pl

+44 -16
-1
arch/x86/kernel/ftrace.c
··· 20 20 21 21 #include <asm/cacheflush.h> 22 22 #include <asm/ftrace.h> 23 - #include <linux/ftrace.h> 24 23 #include <asm/nops.h> 25 24 #include <asm/nmi.h> 26 25
+1
block/blk-core.c
··· 131 131 INIT_HLIST_NODE(&rq->hash); 132 132 RB_CLEAR_NODE(&rq->rb_node); 133 133 rq->cmd = rq->__cmd; 134 + rq->cmd_len = BLK_MAX_CDB; 134 135 rq->tag = -1; 135 136 rq->ref_count = 1; 136 137 }
+2 -1
include/linux/compiler.h
··· 76 76 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 77 77 * to disable branch tracing on a per file basis. 78 78 */ 79 - #if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) 79 + #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 80 + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 80 81 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); 81 82 82 83 #define likely_notrace(x) __builtin_expect(!!(x), 1)
+6 -2
include/linux/ftrace.h
··· 356 356 357 357 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 358 358 359 + /* for init task */ 360 + #define INIT_FTRACE_GRAPH .ret_stack = NULL 361 + 359 362 /* 360 363 * Stack of return addresses for functions 361 364 * of a thread. ··· 433 430 { 434 431 atomic_dec(&current->tracing_graph_pause); 435 432 } 436 - #else 433 + #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 437 434 438 435 #define __notrace_funcgraph 439 436 #define __irq_entry 437 + #define INIT_FTRACE_GRAPH 440 438 441 439 static inline void ftrace_graph_init_task(struct task_struct *t) { } 442 440 static inline void ftrace_graph_exit_task(struct task_struct *t) { } ··· 449 445 450 446 static inline void pause_graph_tracing(void) { } 451 447 static inline void unpause_graph_tracing(void) { } 452 - #endif 448 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 453 449 454 450 #ifdef CONFIG_TRACING 455 451 #include <linux/sched.h>
+2
include/linux/init_task.h
··· 5 5 #include <linux/irqflags.h> 6 6 #include <linux/utsname.h> 7 7 #include <linux/lockdep.h> 8 + #include <linux/ftrace.h> 8 9 #include <linux/ipc.h> 9 10 #include <linux/pid_namespace.h> 10 11 #include <linux/user_namespace.h> ··· 186 185 INIT_IDS \ 187 186 INIT_TRACE_IRQFLAGS \ 188 187 INIT_LOCKDEP \ 188 + INIT_FTRACE_GRAPH \ 189 189 } 190 190 191 191
+4 -3
kernel/trace/blktrace.c
··· 327 327 char *msg; 328 328 struct blk_trace *bt; 329 329 330 - if (count > BLK_TN_MAX_MSG) 330 + if (count >= BLK_TN_MAX_MSG) 331 331 return -EINVAL; 332 332 333 - msg = kmalloc(count, GFP_KERNEL); 333 + msg = kmalloc(count + 1, GFP_KERNEL); 334 334 if (msg == NULL) 335 335 return -ENOMEM; 336 336 ··· 339 339 return -EFAULT; 340 340 } 341 341 342 + msg[count] = '\0'; 342 343 bt = filp->private_data; 343 344 __trace_note_message(bt, "%s", msg); 344 345 kfree(msg); ··· 643 642 if (blk_pc_request(rq)) { 644 643 what |= BLK_TC_ACT(BLK_TC_PC); 645 644 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 646 - sizeof(rq->cmd), rq->cmd); 645 + rq->cmd_len, rq->cmd); 647 646 } else { 648 647 what |= BLK_TC_ACT(BLK_TC_FS); 649 648 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+16 -5
kernel/trace/trace.c
··· 30 30 #include <linux/percpu.h> 31 31 #include <linux/splice.h> 32 32 #include <linux/kdebug.h> 33 + #include <linux/string.h> 33 34 #include <linux/ctype.h> 34 35 #include <linux/init.h> 35 36 #include <linux/poll.h> ··· 148 147 } 149 148 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 150 149 151 - long 152 - ns2usecs(cycle_t nsec) 150 + unsigned long long ns2usecs(cycle_t nsec) 153 151 { 154 152 nsec += 500; 155 153 do_div(nsec, 1000); ··· 1632 1632 return; 1633 1633 1634 1634 cpumask_set_cpu(iter->cpu, iter->started); 1635 - trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1635 + 1636 + /* Don't print started cpu buffer for the first entry of the trace */ 1637 + if (iter->idx > 1) 1638 + trace_seq_printf(s, "##### CPU %u buffer started ####\n", 1639 + iter->cpu); 1636 1640 } 1637 1641 1638 1642 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) ··· 1871 1867 if (current_trace) 1872 1868 *iter->trace = *current_trace; 1873 1869 1870 + if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1871 + goto fail; 1872 + 1873 + cpumask_clear(iter->started); 1874 + 1874 1875 if (current_trace && current_trace->print_max) 1875 1876 iter->tr = &max_tr; 1876 1877 else ··· 1926 1917 if (iter->buffer_iter[cpu]) 1927 1918 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1928 1919 } 1920 + free_cpumask_var(iter->started); 1929 1921 fail: 1930 1922 mutex_unlock(&trace_types_lock); 1931 1923 kfree(iter->trace); ··· 1970 1960 1971 1961 seq_release(inode, file); 1972 1962 mutex_destroy(&iter->mutex); 1963 + free_cpumask_var(iter->started); 1973 1964 kfree(iter->trace); 1974 1965 kfree(iter); 1975 1966 return 0; ··· 2369 2358 "# mkdir /debug\n" 2370 2359 "# mount -t debugfs nodev /debug\n\n" 2371 2360 "# cat /debug/tracing/available_tracers\n" 2372 - "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" 2361 + "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" 2373 2362 "# cat /debug/tracing/current_tracer\n" 2374 - "none\n" 2363 + "nop\n" 2375 2364 "# echo sched_switch > /debug/tracing/current_tracer\n" 2376 2365 "# cat /debug/tracing/current_tracer\n" 2377 2366 "sched_switch\n"
+1 -1
kernel/trace/trace.h
··· 602 602 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 603 603 604 604 extern void *head_page(struct trace_array_cpu *data); 605 - extern long ns2usecs(cycle_t nsec); 605 + extern unsigned long long ns2usecs(cycle_t nsec); 606 606 extern int 607 607 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 608 608 extern int
+1 -1
kernel/trace/trace_export.c
··· 40 40 41 41 #undef TRACE_FIELD_ZERO_CHAR 42 42 #define TRACE_FIELD_ZERO_CHAR(item) \ 43 - ret = trace_seq_printf(s, "\tfield: char " #item ";\t" \ 43 + ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \ 44 44 "offset:%u;\tsize:0;\n", \ 45 45 (unsigned int)offsetof(typeof(field), item)); \ 46 46 if (!ret) \
+1 -1
kernel/trace/trace_output.c
··· 423 423 424 424 trace_find_cmdline(entry->pid, comm); 425 425 426 - ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" 426 + ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" 427 427 " %ld.%03ldms (+%ld.%03ldms): ", comm, 428 428 entry->pid, iter->cpu, entry->flags, 429 429 entry->preempt_count, iter->idx,
+3
kernel/trace/trace_sched_switch.c
··· 62 62 pc = preempt_count(); 63 63 tracing_record_cmdline(current); 64 64 65 + if (sched_stopped) 66 + return; 67 + 65 68 local_irq_save(flags); 66 69 cpu = raw_smp_processor_id(); 67 70 data = ctx_trace->data[cpu];
+7 -1
kernel/trace/trace_sched_wakeup.c
··· 154 154 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 155 goto out_unlock; 156 156 157 - trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 157 + trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 158 158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 159 159 160 160 /* ··· 257 257 data = wakeup_trace->data[wakeup_cpu]; 258 258 data->preempt_timestamp = ftrace_now(cpu); 259 259 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 260 + 261 + /* 262 + * We must be careful in using CALLER_ADDR2. But since wake_up 263 + * is not called by an assembly function (where as schedule is) 264 + * it should be safe to use it here. 265 + */ 260 266 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 261 267 262 268 out_locked:
scripts/trace/power.pl scripts/tracing/power.pl