trace_clock: fix preemption bug
Using the function_graph tracer in recent kernels generates a spew of preemption BUGs. Fix this by not requiring trace_clock_local() users to disable preemption themselves. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2395037e5d
commit
6cc3c6e12b
1 changed files with 8 additions and 1 deletions
|
@ -27,12 +27,19 @@
|
|||
*/
|
||||
u64 notrace trace_clock_local(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 clock;
|
||||
|
||||
/*
|
||||
* sched_clock() is an architecture implemented, fast, scalable,
|
||||
* lockless clock. It is not guaranteed to be coherent across
|
||||
* CPUs, nor across CPU idle events.
|
||||
*/
|
||||
return sched_clock();
|
||||
raw_local_irq_save(flags);
|
||||
clock = sched_clock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue