sched: use highest_prio.curr for pull threshold

highest_prio.curr is actually a more accurate way to keep track of
the pull_rt_task() threshold since it is always up to date, even
if the "next" task migrates during double_lock.  Therefore, stop
looking at the "next" task object and simply use the highest_prio.curr.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
This commit is contained in:
Gregory Haskins 2008-12-29 09:39:49 -05:00
parent e864c499d9
commit a8728944ef

View file

@ -1207,14 +1207,12 @@ static void push_rt_tasks(struct rq *rq)
static int pull_rt_task(struct rq *this_rq) static int pull_rt_task(struct rq *this_rq)
{ {
int this_cpu = this_rq->cpu, ret = 0, cpu; int this_cpu = this_rq->cpu, ret = 0, cpu;
struct task_struct *p, *next; struct task_struct *p;
struct rq *src_rq; struct rq *src_rq;
if (likely(!rt_overloaded(this_rq))) if (likely(!rt_overloaded(this_rq)))
return 0; return 0;
next = pick_next_task_rt(this_rq);
for_each_cpu(cpu, this_rq->rd->rto_mask) { for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu) if (this_cpu == cpu)
continue; continue;
@ -1223,17 +1221,9 @@ static int pull_rt_task(struct rq *this_rq)
/* /*
* We can potentially drop this_rq's lock in * We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could * double_lock_balance, and another CPU could
* steal our next task - hence we must cause * alter this_rq
* the caller to recalculate the next task
* in that case:
*/ */
if (double_lock_balance(this_rq, src_rq)) { double_lock_balance(this_rq, src_rq);
struct task_struct *old_next = next;
next = pick_next_task_rt(this_rq);
if (next != old_next)
ret = 1;
}
/* /*
* Are there still pullable RT tasks? * Are there still pullable RT tasks?
@ -1247,7 +1237,7 @@ static int pull_rt_task(struct rq *this_rq)
* Do we have an RT task that preempts * Do we have an RT task that preempts
* the to-be-scheduled task? * the to-be-scheduled task?
*/ */
if (p && (!next || (p->prio < next->prio))) { if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr); WARN_ON(p == src_rq->curr);
WARN_ON(!p->se.on_rq); WARN_ON(!p->se.on_rq);
@ -1257,12 +1247,9 @@ static int pull_rt_task(struct rq *this_rq)
* This is just that p is wakeing up and hasn't * This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull * had a chance to schedule. We only pull
* p if it is lower in priority than the * p if it is lower in priority than the
* current task on the run queue or * current task on the run queue
* this_rq next task is lower in prio than
* the current task on that rq.
*/ */
if (p->prio < src_rq->curr->prio || if (p->prio < src_rq->curr->prio)
(next && next->prio < src_rq->curr->prio))
goto skip; goto skip;
ret = 1; ret = 1;
@ -1275,13 +1262,7 @@ static int pull_rt_task(struct rq *this_rq)
* case there's an even higher prio task * case there's an even higher prio task
* in another runqueue. (low likelyhood * in another runqueue. (low likelyhood
* but possible) * but possible)
*
* Update next so that we won't pick a task
* on another cpu with a priority lower (or equal)
* than the one we just picked.
*/ */
next = p;
} }
skip: skip:
double_unlock_balance(this_rq, src_rq); double_unlock_balance(this_rq, src_rq);