clocksource: Avoid clocksource watchdog circular locking dependency
stop_machine from a multithreaded workqueue is not allowed because of a circular locking dependency between cpu_down and the workqueue execution. Use a kernel thread to do the clocksource downgrade. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: john stultz <johnstul@us.ibm.com> LKML-Reference: <20090818170942.3ab80c91@skybase> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
d0981a1b21
commit
01548f4d3e
1 changed files with 16 additions and 3 deletions
|
@ -29,6 +29,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
|
||||
#include <linux/tick.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
void timecounter_init(struct timecounter *tc,
|
||||
const struct cyclecounter *cc,
|
||||
|
@ -130,7 +131,7 @@ static DEFINE_SPINLOCK(watchdog_lock);
|
|||
static cycle_t watchdog_last;
|
||||
static int watchdog_running;
|
||||
|
||||
static void clocksource_watchdog_work(struct work_struct *work);
|
||||
static int clocksource_watchdog_kthread(void *data);
|
||||
static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
||||
|
||||
/*
|
||||
|
@ -139,6 +140,15 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
|||
#define WATCHDOG_INTERVAL (HZ >> 1)
|
||||
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
|
||||
|
||||
static void clocksource_watchdog_work(struct work_struct *work)
|
||||
{
|
||||
/*
|
||||
* If kthread_run fails the next watchdog scan over the
|
||||
* watchdog_list will find the unstable clock again.
|
||||
*/
|
||||
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
|
||||
}
|
||||
|
||||
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
|
||||
{
|
||||
printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
|
||||
|
@ -167,8 +177,10 @@ static void clocksource_watchdog(unsigned long data)
|
|||
list_for_each_entry(cs, &watchdog_list, wd_list) {
|
||||
|
||||
/* Clocksource already marked unstable? */
|
||||
if (cs->flags & CLOCK_SOURCE_UNSTABLE)
|
||||
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
|
||||
schedule_work(&watchdog_work);
|
||||
continue;
|
||||
}
|
||||
|
||||
csnow = cs->read(cs);
|
||||
|
||||
|
@ -304,7 +316,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
|||
spin_unlock_irqrestore(&watchdog_lock, flags);
|
||||
}
|
||||
|
||||
static void clocksource_watchdog_work(struct work_struct *work)
|
||||
static int clocksource_watchdog_kthread(void *data)
|
||||
{
|
||||
struct clocksource *cs, *tmp;
|
||||
unsigned long flags;
|
||||
|
@ -327,6 +339,7 @@ static void clocksource_watchdog_work(struct work_struct *work)
|
|||
__clocksource_change_rating(cs, 0);
|
||||
}
|
||||
mutex_unlock(&clocksource_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
|
||||
|
|
Loading…
Reference in a new issue