perf, x86: Prefer fixed-purpose counters when scheduling
This avoids a scheduling failure for cases like: cycles, cycles, instructions, instructions (on Core2) Which would end up being programmed like: PMC0, PMC1, FP-instructions, fail Because all events will have the same weight. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-8tnwb92asqj7xajqqoty4gel@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
bc1738f6ee
commit
4defea8559
1 changed files with 14 additions and 5 deletions
|
@ -574,16 +574,25 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
|
||||||
|
|
||||||
c = sched->constraints[sched->state.event];
|
c = sched->constraints[sched->state.event];
|
||||||
|
|
||||||
|
/* Prefer fixed purpose counters */
|
||||||
|
if (x86_pmu.num_counters_fixed) {
|
||||||
|
idx = X86_PMC_IDX_FIXED;
|
||||||
|
for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
|
||||||
|
if (!__test_and_set_bit(idx, sched->state.used))
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
/* Grab the first unused counter starting with idx */
|
/* Grab the first unused counter starting with idx */
|
||||||
idx = sched->state.counter;
|
idx = sched->state.counter;
|
||||||
for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
|
for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
|
||||||
if (!__test_and_set_bit(idx, sched->state.used))
|
if (!__test_and_set_bit(idx, sched->state.used))
|
||||||
break;
|
goto done;
|
||||||
}
|
}
|
||||||
sched->state.counter = idx;
|
|
||||||
|
|
||||||
if (idx >= X86_PMC_IDX_MAX)
|
return false;
|
||||||
return false;
|
|
||||||
|
done:
|
||||||
|
sched->state.counter = idx;
|
||||||
|
|
||||||
if (c->overlap)
|
if (c->overlap)
|
||||||
perf_sched_save_state(sched);
|
perf_sched_save_state(sched);
|
||||||
|
|
Loading…
Reference in a new issue