Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
ftrace: Revert 8ab2b7efd ftrace: Remove unnecessary disabling of irqs
kprobes/trace: Fix kprobe selftest for gcc 4.6
ftrace: Fix possible undefined return code
oprofile, dcookies: Fix possible circular locking dependency
oprofile: Fix locking dependency in sync_start()
oprofile: Free potentially owned tasks in case of errors
oprofile, x86: Add comments to IBS LVT offset initialization

+39 -18
+2 -1
arch/x86/kernel/apic/apic.c
··· 390 390 391 391 /* 392 392 * If mask=1, the LVT entry does not generate interrupts while mask=0 393 - * enables the vector. See also the BKDGs. 393 + * enables the vector. See also the BKDGs. Must be called with 394 + * preemption disabled. 394 395 */ 395 396 396 397 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
+9 -4
arch/x86/oprofile/op_model_amd.c
··· 609 609 return 0; 610 610 } 611 611 612 + /* 613 + * This runs only on the current cpu. We try to find an LVT offset and 614 + * setup the local APIC. For this we must disable preemption. On 615 + * success we initialize all nodes with this offset. This updates then 616 + * the offset in the IBS_CTL per-node msr. The per-core APIC setup of 617 + * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_- 618 + * amd_cpu_shutdown() using the new offset. 619 + */ 612 620 static int force_ibs_eilvt_setup(void) 613 621 { 614 622 int offset; 615 623 int ret; 616 624 617 - /* 618 - * find the next free available EILVT entry, skip offset 0, 619 - * pin search to this cpu 620 - */ 621 625 preempt_disable(); 626 + /* find the next free available EILVT entry, skip offset 0 */ 622 627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { 623 628 if (get_eilvt(offset)) 624 629 break;
+11 -10
drivers/oprofile/buffer_sync.c
··· 141 141 .notifier_call = module_load_notify, 142 142 }; 143 143 144 + static void free_all_tasks(void) 145 + { 146 + /* make sure we don't leak task structs */ 147 + process_task_mortuary(); 148 + process_task_mortuary(); 149 + } 150 + 144 151 int sync_start(void) 145 152 { 146 153 int err; 147 154 148 155 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 149 156 return -ENOMEM; 150 - 151 - mutex_lock(&buffer_mutex); 152 157 153 158 err = task_handoff_register(&task_free_nb); 154 159 if (err) ··· 171 166 start_cpu_work(); 172 167 173 168 out: 174 - mutex_unlock(&buffer_mutex); 175 169 return err; 176 170 out4: 177 171 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); ··· 178 174 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 179 175 out2: 180 176 task_handoff_unregister(&task_free_nb); 177 + free_all_tasks(); 181 178 out1: 182 179 free_cpumask_var(marked_cpus); 183 180 goto out; ··· 187 182 188 183 void sync_stop(void) 189 184 { 190 - /* flush buffers */ 191 - mutex_lock(&buffer_mutex); 192 185 end_cpu_work(); 193 186 unregister_module_notifier(&module_load_nb); 194 187 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 195 188 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 196 189 task_handoff_unregister(&task_free_nb); 197 - mutex_unlock(&buffer_mutex); 190 + barrier(); /* do all of the above first */ 191 + 198 192 flush_cpu_work(); 199 193 200 - /* make sure we don't leak task structs */ 201 - process_task_mortuary(); 202 - process_task_mortuary(); 203 - 194 + free_all_tasks(); 204 195 free_cpumask_var(marked_cpus); 205 196 } 206 197
+3
fs/dcookies.c
··· 178 178 /* FIXME: (deleted) ? */ 179 179 path = d_path(&dcs->path, kbuf, PAGE_SIZE); 180 180 181 + mutex_unlock(&dcookie_mutex); 182 + 181 183 if (IS_ERR(path)) { 182 184 err = PTR_ERR(path); 183 185 goto out_free; ··· 196 194 197 195 out_free: 198 196 kfree(kbuf); 197 + return err; 199 198 out: 200 199 mutex_unlock(&dcookie_mutex); 201 200 return err;
+8 -1
kernel/trace/ftrace.c
··· 2740 2740 { 2741 2741 char *func, *command, *next = buff; 2742 2742 struct ftrace_func_command *p; 2743 - int ret; 2743 + int ret = -EINVAL; 2744 2744 2745 2745 func = strsep(&next, ":"); 2746 2746 ··· 3330 3330 { 3331 3331 unsigned long *p; 3332 3332 unsigned long addr; 3333 + unsigned long flags; 3333 3334 3334 3335 mutex_lock(&ftrace_lock); 3335 3336 p = start; ··· 3347 3346 ftrace_record_ip(addr); 3348 3347 } 3349 3348 3349 + /* 3350 + * Disable interrupts to prevent interrupts from executing 3351 + * code that is being modified. 3352 + */ 3353 + local_irq_save(flags); 3350 3354 ftrace_update_code(mod); 3355 + local_irq_restore(flags); 3351 3356 mutex_unlock(&ftrace_lock); 3352 3357 3353 3358 return 0;
+6 -2
kernel/trace/trace_kprobe.c
··· 1870 1870 1871 1871 #ifdef CONFIG_FTRACE_STARTUP_TEST 1872 1872 1873 - static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1874 - int a4, int a5, int a6) 1873 + /* 1874 + * The "__used" keeps gcc from removing the function symbol 1875 + * from the kallsyms table. 1876 + */ 1877 + static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, 1878 + int a4, int a5, int a6) 1875 1879 { 1876 1880 return a1 + a2 + a3 + a4 + a5 + a6; 1877 1881 }