Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Borislav Petkov:

- Pass only an initialized perf event attribute to the LSM hook

- Fix a use-after-free on the perf syscall's error path

- A potential integer overflow fix in amd_core_pmu_init()

- Fix the cgroup events tracking after the context handling rewrite

- Return the proper value from the inherit_event() function on error

* tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Call LSM hook after copying perf_event_attr
perf: Fix use-after-free in error path
perf/x86/amd: fix potential integer overflow on shift of a int
perf/core: Fix cgroup events tracking
perf core: Return error pointer if inherit_event() fails to find pmu_ctx

+18 -38
+1 -1
arch/x86/events/amd/core.c
··· 1387 1387 * numbered counter following it. 1388 1388 */ 1389 1389 for (i = 0; i < x86_pmu.num_counters - 1; i += 2) 1390 - even_ctr_mask |= 1 << i; 1390 + even_ctr_mask |= BIT_ULL(i); 1391 1391 1392 1392 pair_constraint = (struct event_constraint) 1393 1393 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+17 -37
kernel/events/core.c
··· 380 380 381 381 /* 382 382 * perf_sched_events : >0 events exist 383 - * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 384 383 */ 385 384 386 385 static void perf_sched_delayed(struct work_struct *work); ··· 388 389 static DEFINE_MUTEX(perf_sched_mutex); 389 390 static atomic_t perf_sched_count; 390 391 391 - static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 392 392 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); 393 393 394 394 static atomic_t nr_mmap_events __read_mostly; ··· 842 844 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); 843 845 struct perf_cgroup *cgrp; 844 846 845 - cgrp = perf_cgroup_from_task(task, NULL); 847 + /* 848 + * cpuctx->cgrp is set when the first cgroup event enabled, 849 + * and is cleared when the last cgroup event disabled. 850 + */ 851 + if (READ_ONCE(cpuctx->cgrp) == NULL) 852 + return; 846 853 847 854 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); 855 + 856 + cgrp = perf_cgroup_from_task(task, NULL); 848 857 if (READ_ONCE(cpuctx->cgrp) == cgrp) 849 858 return; 850 859 ··· 3636 3631 * to check if we have to switch out PMU state. 3637 3632 * cgroup event are system-wide mode only 3638 3633 */ 3639 - if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 3640 - perf_cgroup_switch(next); 3634 + perf_cgroup_switch(next); 3641 3635 } 3642 3636 3643 3637 static bool perf_less_group_idx(const void *l, const void *r) ··· 4978 4974 detach_sb_event(event); 4979 4975 } 4980 4976 4981 - static void unaccount_event_cpu(struct perf_event *event, int cpu) 4982 - { 4983 - if (event->parent) 4984 - return; 4985 - 4986 - if (is_cgroup_event(event)) 4987 - atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 4988 - } 4989 - 4990 4977 #ifdef CONFIG_NO_HZ_FULL 4991 4978 static DEFINE_SPINLOCK(nr_freq_lock); 4992 4979 #endif ··· 5042 5047 if (!atomic_add_unless(&perf_sched_count, -1, 1)) 5043 5048 schedule_delayed_work(&perf_sched_work, HZ); 5044 5049 } 5045 - 5046 - unaccount_event_cpu(event, event->cpu); 5047 5050 5048 5051 unaccount_pmu_sb_event(event); 5049 5052 } ··· 11672 11679 attach_sb_event(event); 11673 11680 } 11674 11681 11675 - static void account_event_cpu(struct perf_event *event, int cpu) 11676 - { 11677 - if (event->parent) 11678 - return; 11679 - 11680 - if (is_cgroup_event(event)) 11681 - atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 11682 - } 11683 - 11684 11682 /* Freq events need the tick to stay alive (see perf_event_task_tick). */ 11685 11683 static void account_freq_event_nohz(void) 11686 11684 { ··· 11758 11774 mutex_unlock(&perf_sched_mutex); 11759 11775 } 11760 11776 enabled: 11761 - 11762 - account_event_cpu(event, event->cpu); 11763 11777 11764 11778 account_pmu_sb_event(event); 11765 11779 } ··· 12321 12339 if (flags & ~PERF_FLAG_ALL) 12322 12340 return -EINVAL; 12323 12341 12324 - /* Do we allow access to perf_event_open(2) ? */ 12325 - err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); 12342 + err = perf_copy_attr(attr_uptr, &attr); 12326 12343 if (err) 12327 12344 return err; 12328 12345 12329 - err = perf_copy_attr(attr_uptr, &attr); 12346 + /* Do we allow access to perf_event_open(2) ? */ 12347 + err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); 12330 12348 if (err) 12331 12349 return err; 12332 12350 ··· 12671 12689 return event_fd; 12672 12690 12673 12691 err_context: 12674 - /* event->pmu_ctx freed by free_event() */ 12692 + put_pmu_ctx(event->pmu_ctx); 12693 + event->pmu_ctx = NULL; /* _free_event() */ 12675 12694 err_locked: 12676 12695 mutex_unlock(&ctx->mutex); 12677 12696 perf_unpin_context(ctx); ··· 12785 12802 12786 12803 err_pmu_ctx: 12787 12804 put_pmu_ctx(pmu_ctx); 12805 + event->pmu_ctx = NULL; /* _free_event() */ 12788 12806 err_unlock: 12789 12807 mutex_unlock(&ctx->mutex); 12790 12808 perf_unpin_context(ctx); ··· 12806 12822 12807 12823 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { 12808 12824 perf_remove_from_context(event, 0); 12809 - unaccount_event_cpu(event, cpu); 12810 12825 put_pmu_ctx(event->pmu_ctx); 12811 12826 list_add(&event->migrate_entry, events); 12812 12827 12813 12828 for_each_sibling_event(sibling, event) { 12814 12829 perf_remove_from_context(sibling, 0); 12815 - unaccount_event_cpu(sibling, cpu); 12816 12830 put_pmu_ctx(sibling->pmu_ctx); 12817 12831 list_add(&sibling->migrate_entry, events); 12818 12832 } ··· 12829 12847 12830 12848 if (event->state >= PERF_EVENT_STATE_OFF) 12831 12849 event->state = PERF_EVENT_STATE_INACTIVE; 12832 - account_event_cpu(event, cpu); 12833 12850 perf_install_in_context(ctx, event, cpu); 12834 12851 } 12835 12852 ··· 13212 13231 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event); 13213 13232 if (IS_ERR(pmu_ctx)) { 13214 13233 free_event(child_event); 13215 - return NULL; 13234 + return ERR_CAST(pmu_ctx); 13216 13235 } 13217 13236 child_event->pmu_ctx = pmu_ctx; 13218 13237 ··· 13723 13742 struct task_struct *task = info; 13724 13743 13725 13744 preempt_disable(); 13726 - if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 13727 - perf_cgroup_switch(task); 13745 + perf_cgroup_switch(task); 13728 13746 preempt_enable(); 13729 13747 13730 13748 return 0;