sched: Migrate sched to use new tick dependency mask model
Instead of providing asynchronous checks for the nohz subsystem to verify sched tick dependency, migrate sched to the new mask. Everytime a task is enqueued or dequeued, we evaluate the state of the tick dependency on top of the policy of the tasks in the runqueue, by order of priority: SCHED_DEADLINE: Need the tick in order to periodically check for runtime SCHED_FIFO : Don't need the tick (no round-robin) SCHED_RR : Need the tick if more than 1 task of the same priority for round robin (simplified with checking if more than one SCHED_RR task no matter what priority). SCHED_NORMAL : Need the tick if more than 1 task for round-robin. We could optimize that further with one flag per sched policy on the tick dependency mask and perform only the checks relevant to the policy concerned by an enqueue/dequeue operation. Since the checks aren't based on the current task anymore, we could get rid of the task switch hook but it's still needed for posix cpu timers. Reviewed-by: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
01d36d0ac3
commit
76d92ac305
4 changed files with 53 additions and 37 deletions
|
@ -2364,10 +2364,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern bool sched_can_stop_tick(void);
|
||||
extern u64 scheduler_tick_max_deferment(void);
|
||||
#else
|
||||
static inline bool sched_can_stop_tick(void) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
|
|
|
@ -701,31 +701,36 @@ static inline bool got_nohz_idle_kick(void)
|
|||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
bool sched_can_stop_tick(void)
|
||||
bool sched_can_stop_tick(struct rq *rq)
|
||||
{
|
||||
int fifo_nr_running;
|
||||
|
||||
/* Deadline tasks, even if single, need the tick */
|
||||
if (rq->dl.dl_nr_running)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* FIFO realtime policy runs the highest priority task. Other runnable
|
||||
* tasks are of a lower priority. The scheduler tick does nothing.
|
||||
* FIFO realtime policy runs the highest priority task (after DEADLINE).
|
||||
* Other runnable tasks are of a lower priority. The scheduler tick
|
||||
* isn't needed.
|
||||
*/
|
||||
if (current->policy == SCHED_FIFO)
|
||||
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
||||
if (fifo_nr_running)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Round-robin realtime tasks time slice with other tasks at the same
|
||||
* realtime priority. Is this task the only one at this priority?
|
||||
* realtime priority.
|
||||
*/
|
||||
if (current->policy == SCHED_RR) {
|
||||
struct sched_rt_entity *rt_se = ¤t->rt;
|
||||
|
||||
return list_is_singular(&rt_se->run_list);
|
||||
if (rq->rt.rr_nr_running) {
|
||||
if (rq->rt.rr_nr_running == 1)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* More than one running task need preemption.
|
||||
* nr_running update is assumed to be visible
|
||||
* after IPI is sent from wakers.
|
||||
*/
|
||||
if (this_rq()->nr_running > 1)
|
||||
/* Normal multitasking need periodic preemption checks */
|
||||
if (rq->cfs.nr_running > 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -1279,6 +1279,35 @@ unsigned long to_ratio(u64 period, u64 runtime);
|
|||
|
||||
extern void init_entity_runnable_average(struct sched_entity *se);
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern bool sched_can_stop_tick(struct rq *rq);
|
||||
|
||||
/*
|
||||
* Tick may be needed by tasks in the runqueue depending on their policy and
|
||||
* requirements. If tick is needed, lets send the target an IPI to kick it out of
|
||||
* nohz mode if necessary.
|
||||
*/
|
||||
static inline void sched_update_tick_dependency(struct rq *rq)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!tick_nohz_full_enabled())
|
||||
return;
|
||||
|
||||
cpu = cpu_of(rq);
|
||||
|
||||
if (!tick_nohz_full_cpu(cpu))
|
||||
return;
|
||||
|
||||
if (sched_can_stop_tick(rq))
|
||||
tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
|
||||
else
|
||||
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
|
||||
}
|
||||
#else
|
||||
static inline void sched_update_tick_dependency(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
unsigned prev_nr = rq->nr_running;
|
||||
|
@ -1290,26 +1319,16 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
|||
if (!rq->rd->overload)
|
||||
rq->rd->overload = true;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (tick_nohz_full_cpu(rq->cpu)) {
|
||||
/*
|
||||
* Tick is needed if more than one task runs on a CPU.
|
||||
* Send the target an IPI to kick it out of nohz mode.
|
||||
*
|
||||
* We assume that IPI implies full memory barrier and the
|
||||
* new value of rq->nr_running is visible on reception
|
||||
* from the target.
|
||||
*/
|
||||
tick_nohz_full_kick_cpu(rq->cpu);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
||||
static inline void sub_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
rq->nr_running -= count;
|
||||
/* Check if we still need preemption */
|
||||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
||||
static inline void rq_last_tick_reset(struct rq *rq)
|
||||
|
|
|
@ -204,11 +204,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!sched_can_stop_tick()) {
|
||||
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!posix_cpu_timers_can_stop_tick(current)) {
|
||||
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
|
||||
return false;
|
||||
|
|
Loading…
Reference in a new issue