diff options
-rw-r--r-- | kernel/sched/fair.c | 5 | ||||
-rw-r--r-- | kernel/sched/proc.c | 17 |
2 files changed, 18 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9bbc303598ea..e6d82cae4910 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2963,7 +2963,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
2963 | /* Used instead of source_load when we know the type == 0 */ | 2963 | /* Used instead of source_load when we know the type == 0 */ |
2964 | static unsigned long weighted_cpuload(const int cpu) | 2964 | static unsigned long weighted_cpuload(const int cpu) |
2965 | { | 2965 | { |
2966 | return cpu_rq(cpu)->load.weight; | 2966 | return cpu_rq(cpu)->cfs.runnable_load_avg; |
2967 | } | 2967 | } |
2968 | 2968 | ||
2969 | /* | 2969 | /* |
@@ -3008,9 +3008,10 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
3008 | { | 3008 | { |
3009 | struct rq *rq = cpu_rq(cpu); | 3009 | struct rq *rq = cpu_rq(cpu); |
3010 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | 3010 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); |
3011 | unsigned long load_avg = rq->cfs.runnable_load_avg; | ||
3011 | 3012 | ||
3012 | if (nr_running) | 3013 | if (nr_running) |
3013 | return rq->load.weight / nr_running; | 3014 | return load_avg / nr_running; |
3014 | 3015 | ||
3015 | return 0; | 3016 | return 0; |
3016 | } | 3017 | } |
diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c index bb3a6a0b8623..ce5cd4892e43 100644 --- a/kernel/sched/proc.c +++ b/kernel/sched/proc.c | |||
@@ -501,6 +501,18 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, | |||
501 | sched_avg_update(this_rq); | 501 | sched_avg_update(this_rq); |
502 | } | 502 | } |
503 | 503 | ||
504 | #ifdef CONFIG_SMP | ||
505 | unsigned long get_rq_runnable_load(struct rq *rq) | ||
506 | { | ||
507 | return rq->cfs.runnable_load_avg; | ||
508 | } | ||
509 | #else | ||
510 | unsigned long get_rq_runnable_load(struct rq *rq) | ||
511 | { | ||
512 | return rq->load.weight; | ||
513 | } | ||
514 | #endif | ||
515 | |||
504 | #ifdef CONFIG_NO_HZ_COMMON | 516 | #ifdef CONFIG_NO_HZ_COMMON |
505 | /* | 517 | /* |
506 | * There is no sane way to deal with nohz on smp when using jiffies because the | 518 | * There is no sane way to deal with nohz on smp when using jiffies because the |
@@ -522,7 +534,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, | |||
522 | void update_idle_cpu_load(struct rq *this_rq) | 534 | void update_idle_cpu_load(struct rq *this_rq) |
523 | { | 535 | { |
524 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); | 536 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); |
525 | unsigned long load = this_rq->load.weight; | 537 | unsigned long load = get_rq_runnable_load(this_rq); |
526 | unsigned long pending_updates; | 538 | unsigned long pending_updates; |
527 | 539 | ||
528 | /* | 540 | /* |
@@ -568,11 +580,12 @@ void update_cpu_load_nohz(void) | |||
568 | */ | 580 | */ |
569 | void update_cpu_load_active(struct rq *this_rq) | 581 | void update_cpu_load_active(struct rq *this_rq) |
570 | { | 582 | { |
583 | unsigned long load = get_rq_runnable_load(this_rq); | ||
571 | /* | 584 | /* |
572 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). | 585 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). |
573 | */ | 586 | */ |
574 | this_rq->last_load_update_tick = jiffies; | 587 | this_rq->last_load_update_tick = jiffies; |
575 | __update_cpu_load(this_rq, this_rq->load.weight, 1); | 588 | __update_cpu_load(this_rq, load, 1); |
576 | 589 | ||
577 | calc_load_account_active(this_rq); | 590 | calc_load_account_active(this_rq); |
578 | } | 591 | } |