sched: improve prev_sum_exec_runtime setting
Second preparatory patch for fix-ideal runtime: Mark prev_sum_exec_runtime at the beginning of our run, the same spot that adds our wait period to wait_runtime. This seems a more natural location to do this, and it also reduces the code a bit: text data bss dec hex filename 13397 228 1204 14829 39ed sched.o.before 13391 228 1204 14823 39e7 sched.o.after Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
7c92e54f6f
commit
4a55b45036
1 changed files with 2 additions and 3 deletions
|
@ -684,10 +684,8 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||||
* preempt the current task unless the best task has
|
* preempt the current task unless the best task has
|
||||||
* a larger than sched_granularity fairness advantage:
|
* a larger than sched_granularity fairness advantage:
|
||||||
*/
|
*/
|
||||||
if (__delta > niced_granularity(curr, granularity)) {
|
if (__delta > niced_granularity(curr, granularity))
|
||||||
resched_task(rq_of(cfs_rq)->curr);
|
resched_task(rq_of(cfs_rq)->curr);
|
||||||
curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -703,6 +701,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
update_stats_wait_end(cfs_rq, se);
|
update_stats_wait_end(cfs_rq, se);
|
||||||
update_stats_curr_start(cfs_rq, se);
|
update_stats_curr_start(cfs_rq, se);
|
||||||
set_cfs_rq_curr(cfs_rq, se);
|
set_cfs_rq_curr(cfs_rq, se);
|
||||||
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
|
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
|
||||||
|
|
Loading…
Reference in a new issue