sched: add rq_clock()/__rq_clock()
add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(), used by CFS. It protects against common type of sched_clock() problems (caused by hardware): time warps forwards and backwards. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6aa645ea5f
commit
20d315d42a
1 changed files with 46 additions and 0 deletions
|
@ -388,6 +388,52 @@ static inline int cpu_of(struct rq *rq)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-runqueue clock, as finegrained as the platform can give us:
|
||||
*/
|
||||
static unsigned long long __rq_clock(struct rq *rq)
|
||||
{
|
||||
u64 prev_raw = rq->prev_clock_raw;
|
||||
u64 now = sched_clock();
|
||||
s64 delta = now - prev_raw;
|
||||
u64 clock = rq->clock;
|
||||
|
||||
/*
|
||||
* Protect against sched_clock() occasionally going backwards:
|
||||
*/
|
||||
if (unlikely(delta < 0)) {
|
||||
clock++;
|
||||
rq->clock_warps++;
|
||||
} else {
|
||||
/*
|
||||
* Catch too large forward jumps too:
|
||||
*/
|
||||
if (unlikely(delta > 2*TICK_NSEC)) {
|
||||
clock++;
|
||||
rq->clock_overflows++;
|
||||
} else {
|
||||
if (unlikely(delta > rq->clock_max_delta))
|
||||
rq->clock_max_delta = delta;
|
||||
clock += delta;
|
||||
}
|
||||
}
|
||||
|
||||
rq->prev_clock_raw = now;
|
||||
rq->clock = clock;
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
static inline unsigned long long rq_clock(struct rq *rq)
|
||||
{
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
if (this_cpu == cpu_of(rq))
|
||||
return __rq_clock(rq);
|
||||
|
||||
return rq->clock;
|
||||
}
|
||||
|
||||
/*
|
||||
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
||||
* See detach_destroy_domains: synchronize_sched for details.
|
||||
|
|
Loading…
Reference in a new issue