perf/core: Fix installing cgroup events on CPU
There's two problems when installing cgroup events on CPUs: firstly list_update_cgroup_event() only tries to set cpuctx->cgrp for the first event, if that mismatches on @cgrp we'll not try again for later additions. Secondly, when we install a cgroup event into an active context, only issue an event reprogram when the event matches the current cgroup context. This avoids a pointless event reprogramming. Signed-off-by: leilei.lin <leilei.lin@alibaba-inc.com> [ Improved the changelog and comments. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: brendan.d.gregg@gmail.com Cc: eranian@gmail.com Cc: linux-kernel@vger.kernel.org Cc: yang_oliver@hotmail.com Link: http://lkml.kernel.org/r/20180306093637.28247-1-linxiulei@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
8d5bce0c37
commit
33801b9474
1 changed files with 35 additions and 11 deletions
|
@ -937,27 +937,39 @@ list_update_cgroup_event(struct perf_event *event,
|
|||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
|
||||
if (add && ctx->nr_cgroups++)
|
||||
return;
|
||||
else if (!add && --ctx->nr_cgroups)
|
||||
return;
|
||||
/*
|
||||
* Because cgroup events are always per-cpu events,
|
||||
* this will always be called from the right CPU.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
|
||||
/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
|
||||
if (add) {
|
||||
|
||||
/*
|
||||
* Since setting cpuctx->cgrp is conditional on the current @cgrp
|
||||
* matching the event's cgroup, we must do this for every new event,
|
||||
* because if the first would mismatch, the second would not try again
|
||||
* and we would leave cpuctx->cgrp unset.
|
||||
*/
|
||||
if (add && !cpuctx->cgrp) {
|
||||
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
|
||||
|
||||
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
|
||||
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
|
||||
cpuctx->cgrp = cgrp;
|
||||
} else {
|
||||
list_del(cpuctx_entry);
|
||||
cpuctx->cgrp = NULL;
|
||||
}
|
||||
|
||||
if (add && ctx->nr_cgroups++)
|
||||
return;
|
||||
else if (!add && --ctx->nr_cgroups)
|
||||
return;
|
||||
|
||||
/* no cgroup running */
|
||||
if (!add)
|
||||
cpuctx->cgrp = NULL;
|
||||
|
||||
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
|
||||
if (add)
|
||||
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
|
||||
else
|
||||
list_del(cpuctx_entry);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUP_PERF */
|
||||
|
@ -2489,6 +2501,18 @@ static int __perf_install_in_context(void *info)
|
|||
raw_spin_lock(&task_ctx->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
if (is_cgroup_event(event)) {
|
||||
/*
|
||||
* If the current cgroup doesn't match the event's
|
||||
* cgroup, we should not try to schedule it.
|
||||
*/
|
||||
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
|
||||
reprogram = cgroup_is_descendant(cgrp->css.cgroup,
|
||||
event->cgrp->css.cgroup);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (reprogram) {
|
||||
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
|
||||
add_event_to_ctx(event, ctx);
|
||||
|
|
Loading…
Reference in a new issue