diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 33 |
1 files changed, 0 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 35a8626ace7d..68ed6f4f3c13 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1887,11 +1887,6 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | |||
1887 | 1887 | ||
1888 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1888 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
1889 | { | 1889 | { |
1890 | if (sleep && p->se.last_wakeup) { | ||
1891 | update_avg(&p->se.avg_overlap, | ||
1892 | p->se.sum_exec_runtime - p->se.last_wakeup); | ||
1893 | p->se.last_wakeup = 0; | ||
1894 | } | ||
1895 | sched_info_dequeued(p); | 1890 | sched_info_dequeued(p); |
1896 | p->sched_class->dequeue_task(rq, p, sleep); | 1891 | p->sched_class->dequeue_task(rq, p, sleep); |
1897 | p->se.on_rq = 0; | 1892 | p->se.on_rq = 0; |
@@ -2452,15 +2447,6 @@ out_activate: | |||
2452 | activate_task(rq, p, 1); | 2447 | activate_task(rq, p, 1); |
2453 | success = 1; | 2448 | success = 1; |
2454 | 2449 | ||
2455 | /* | ||
2456 | * Only attribute actual wakeups done by this task. | ||
2457 | */ | ||
2458 | if (!in_interrupt()) { | ||
2459 | struct sched_entity *se = ¤t->se; | ||
2460 | |||
2461 | se->last_wakeup = se->sum_exec_runtime; | ||
2462 | } | ||
2463 | |||
2464 | out_running: | 2450 | out_running: |
2465 | trace_sched_wakeup(rq, p, success); | 2451 | trace_sched_wakeup(rq, p, success); |
2466 | check_preempt_curr(rq, p, wake_flags); | 2452 | check_preempt_curr(rq, p, wake_flags); |
@@ -2522,8 +2508,6 @@ static void __sched_fork(struct task_struct *p) | |||
2522 | p->se.sum_exec_runtime = 0; | 2508 | p->se.sum_exec_runtime = 0; |
2523 | p->se.prev_sum_exec_runtime = 0; | 2509 | p->se.prev_sum_exec_runtime = 0; |
2524 | p->se.nr_migrations = 0; | 2510 | p->se.nr_migrations = 0; |
2525 | p->se.last_wakeup = 0; | ||
2526 | p->se.avg_overlap = 0; | ||
2527 | 2511 | ||
2528 | #ifdef CONFIG_SCHEDSTATS | 2512 | #ifdef CONFIG_SCHEDSTATS |
2529 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); | 2513 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
@@ -3594,23 +3578,6 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3594 | 3578 | ||
3595 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 3579 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
3596 | { | 3580 | { |
3597 | if (prev->state == TASK_RUNNING) { | ||
3598 | u64 runtime = prev->se.sum_exec_runtime; | ||
3599 | |||
3600 | runtime -= prev->se.prev_sum_exec_runtime; | ||
3601 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
3602 | |||
3603 | /* | ||
3604 | * In order to avoid avg_overlap growing stale when we are | ||
3605 | * indeed overlapping and hence not getting put to sleep, grow | ||
3606 | * the avg_overlap on preemption. | ||
3607 | * | ||
3608 | * We use the average preemption runtime because that | ||
3609 | * correlates to the amount of cache footprint a task can | ||
3610 | * build up. | ||
3611 | */ | ||
3612 | update_avg(&prev->se.avg_overlap, runtime); | ||
3613 | } | ||
3614 | prev->sched_class->put_prev_task(rq, prev); | 3581 | prev->sched_class->put_prev_task(rq, prev); |
3615 | } | 3582 | } |
3616 | 3583 | ||