Merge branches 'timers-urgent-for-linus', 'irq-urgent-for-linus' and 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull {timer,irq,core} fixes from Thomas Gleixner: - timer: bug fix for a cpu hotplug race. - irq: single bugfix for a wrong return value, which prevents the calling function to invoke the software fallback. - core: bugfix which plugs two race confitions which can cause hotplug per cpu threads to end up on the wrong cpu. * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hrtimer: Don't reinitialize a cpu_base lock on CPU_UP * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: irqchip: gic: fix irq_trigger return * 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: kthread: Prevent unpark race which puts threads on the wrong cpu
This commit is contained in:
commit
bb33db7a07
7 changed files with 48 additions and 32 deletions
|
@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
|
|||
if (gic_arch_extn.irq_retrigger)
|
||||
return gic_arch_extn.irq_retrigger(d);
|
||||
|
||||
return -ENXIO;
|
||||
/* the genirq layer expects 0 if we can't retrigger in hardware */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
|
|||
"x (dead)", /* 64 */
|
||||
"K (wakekill)", /* 128 */
|
||||
"W (waking)", /* 256 */
|
||||
"P (parked)", /* 512 */
|
||||
};
|
||||
|
||||
static inline const char *get_task_state(struct task_struct *tsk)
|
||||
|
|
|
@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
|||
#define TASK_DEAD 64
|
||||
#define TASK_WAKEKILL 128
|
||||
#define TASK_WAKING 256
|
||||
#define TASK_STATE_MAX 512
|
||||
#define TASK_PARKED 512
|
||||
#define TASK_STATE_MAX 1024
|
||||
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
|
||||
|
||||
extern char ___assert_task_state[1 - 2*!!(
|
||||
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
|
||||
|
|
|
@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
|
|||
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
|
||||
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
|
||||
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
|
||||
{ 128, "W" }) : "R",
|
||||
{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
|
||||
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
|
||||
__entry->next_comm, __entry->next_pid, __entry->next_prio)
|
||||
);
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
||||
{
|
||||
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
|
||||
.clock_base =
|
||||
{
|
||||
{
|
||||
|
@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
|||
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
||||
int i;
|
||||
|
||||
raw_spin_lock_init(&cpu_base->lock);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
||||
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
||||
|
|
|
@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
|
|||
|
||||
static void __kthread_parkme(struct kthread *self)
|
||||
{
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
__set_current_state(TASK_PARKED);
|
||||
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
|
||||
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
|
||||
complete(&self->parked);
|
||||
schedule();
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
__set_current_state(TASK_PARKED);
|
||||
}
|
||||
clear_bit(KTHREAD_IS_PARKED, &self->flags);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|||
}
|
||||
EXPORT_SYMBOL(kthread_create_on_node);
|
||||
|
||||
static void __kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||
static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
|
||||
{
|
||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||
if (!wait_task_inactive(p, state)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
/* It's safe because the task is inactive. */
|
||||
do_set_cpus_allowed(p, cpumask_of(cpu));
|
||||
p->flags |= PF_THREAD_BOUND;
|
||||
|
@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
|
|||
*/
|
||||
void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
__kthread_bind(p, cpu);
|
||||
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_bind);
|
||||
|
||||
|
@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
|
||||
{
|
||||
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
||||
/*
|
||||
* We clear the IS_PARKED bit here as we don't wait
|
||||
* until the task has left the park code. So if we'd
|
||||
* park before that happens we'd see the IS_PARKED bit
|
||||
* which might be about to be cleared.
|
||||
*/
|
||||
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
|
||||
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
|
||||
__kthread_bind(k, kthread->cpu, TASK_PARKED);
|
||||
wake_up_state(k, TASK_PARKED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kthread_unpark - unpark a thread created by kthread_create().
|
||||
* @k: thread created by kthread_create().
|
||||
|
@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
|
|||
{
|
||||
struct kthread *kthread = task_get_live_kthread(k);
|
||||
|
||||
if (kthread) {
|
||||
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
||||
/*
|
||||
* We clear the IS_PARKED bit here as we don't wait
|
||||
* until the task has left the park code. So if we'd
|
||||
* park before that happens we'd see the IS_PARKED bit
|
||||
* which might be about to be cleared.
|
||||
*/
|
||||
if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
|
||||
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
|
||||
__kthread_bind(k, kthread->cpu);
|
||||
wake_up_process(k);
|
||||
}
|
||||
}
|
||||
if (kthread)
|
||||
__kthread_unpark(k, kthread);
|
||||
put_task_struct(k);
|
||||
}
|
||||
|
||||
|
@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
|
|||
trace_sched_kthread_stop(k);
|
||||
if (kthread) {
|
||||
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
|
||||
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
||||
__kthread_unpark(k, kthread);
|
||||
wake_up_process(k);
|
||||
wait_for_completion(&kthread->exited);
|
||||
}
|
||||
|
|
|
@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
|
|||
}
|
||||
get_task_struct(tsk);
|
||||
*per_cpu_ptr(ht->store, cpu) = tsk;
|
||||
if (ht->create)
|
||||
ht->create(cpu);
|
||||
if (ht->create) {
|
||||
/*
|
||||
* Make sure that the task has actually scheduled out
|
||||
* into park position, before calling the create
|
||||
* callback. At least the migration thread callback
|
||||
* requires that the task is off the runqueue.
|
||||
*/
|
||||
if (!wait_task_inactive(tsk, TASK_PARKED))
|
||||
WARN_ON(1);
|
||||
else
|
||||
ht->create(cpu);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue