aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e54231fc6336..7f031e454740 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2484,6 +2484,8 @@ static u32 __compute_runnable_contrib(u64 n)
2484 return contrib + runnable_avg_yN_sum[n]; 2484 return contrib + runnable_avg_yN_sum[n];
2485} 2485}
2486 2486
2487unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
2488
2487/* 2489/*
2488 * We can represent the historical contribution to runnable average as the 2490 * We can represent the historical contribution to runnable average as the
2489 * coefficients of a geometric series. To do this we sub-divide our runnable 2491 * coefficients of a geometric series. To do this we sub-divide our runnable
@@ -2512,7 +2514,7 @@ static u32 __compute_runnable_contrib(u64 n)
2512 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 2514 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2513 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] 2515 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2514 */ 2516 */
2515static __always_inline int __update_entity_runnable_avg(u64 now, 2517static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
2516 struct sched_avg *sa, 2518 struct sched_avg *sa,
2517 int runnable, 2519 int runnable,
2518 int running) 2520 int running)
@@ -2520,6 +2522,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
2520 u64 delta, periods; 2522 u64 delta, periods;
2521 u32 runnable_contrib; 2523 u32 runnable_contrib;
2522 int delta_w, decayed = 0; 2524 int delta_w, decayed = 0;
2525 unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
2523 2526
2524 delta = now - sa->last_runnable_update; 2527 delta = now - sa->last_runnable_update;
2525 /* 2528 /*
@@ -2555,7 +2558,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
2555 if (runnable) 2558 if (runnable)
2556 sa->runnable_avg_sum += delta_w; 2559 sa->runnable_avg_sum += delta_w;
2557 if (running) 2560 if (running)
2558 sa->running_avg_sum += delta_w; 2561 sa->running_avg_sum += delta_w * scale_freq
2562 >> SCHED_CAPACITY_SHIFT;
2559 sa->avg_period += delta_w; 2563 sa->avg_period += delta_w;
2560 2564
2561 delta -= delta_w; 2565 delta -= delta_w;
@@ -2576,7 +2580,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
2576 if (runnable) 2580 if (runnable)
2577 sa->runnable_avg_sum += runnable_contrib; 2581 sa->runnable_avg_sum += runnable_contrib;
2578 if (running) 2582 if (running)
2579 sa->running_avg_sum += runnable_contrib; 2583 sa->running_avg_sum += runnable_contrib * scale_freq
2584 >> SCHED_CAPACITY_SHIFT;
2580 sa->avg_period += runnable_contrib; 2585 sa->avg_period += runnable_contrib;
2581 } 2586 }
2582 2587
@@ -2584,7 +2589,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
2584 if (runnable) 2589 if (runnable)
2585 sa->runnable_avg_sum += delta; 2590 sa->runnable_avg_sum += delta;
2586 if (running) 2591 if (running)
2587 sa->running_avg_sum += delta; 2592 sa->running_avg_sum += delta * scale_freq
2593 >> SCHED_CAPACITY_SHIFT;
2588 sa->avg_period += delta; 2594 sa->avg_period += delta;
2589 2595
2590 return decayed; 2596 return decayed;
@@ -2692,8 +2698,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
2692 2698
2693static inline void update_rq_runnable_avg(struct rq *rq, int runnable) 2699static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2694{ 2700{
2695 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable, 2701 __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
2696 runnable); 2702 runnable, runnable);
2697 __update_tg_runnable_avg(&rq->avg, &rq->cfs); 2703 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2698} 2704}
2699#else /* CONFIG_FAIR_GROUP_SCHED */ 2705#else /* CONFIG_FAIR_GROUP_SCHED */
@@ -2771,6 +2777,7 @@ static inline void update_entity_load_avg(struct sched_entity *se,
2771{ 2777{
2772 struct cfs_rq *cfs_rq = cfs_rq_of(se); 2778 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2773 long contrib_delta, utilization_delta; 2779 long contrib_delta, utilization_delta;
2780 int cpu = cpu_of(rq_of(cfs_rq));
2774 u64 now; 2781 u64 now;
2775 2782
2776 /* 2783 /*
@@ -2782,7 +2789,7 @@ static inline void update_entity_load_avg(struct sched_entity *se,
2782 else 2789 else
2783 now = cfs_rq_clock_task(group_cfs_rq(se)); 2790 now = cfs_rq_clock_task(group_cfs_rq(se));
2784 2791
2785 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq, 2792 if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
2786 cfs_rq->curr == se)) 2793 cfs_rq->curr == se))
2787 return; 2794 return;
2788 2795