aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2018-10-04 08:04:02 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-10-25 10:49:26 -0400
commit145d952a29320dea883246bcb24ba1da7ac4bb7f (patch)
treeb921a75f5e0cdd2fcd52925cce9dc5825157ea2b
parentbd6bf7c10484f026505814b690104cdef27ed460 (diff)
sched: Factor out nr_iowait and nr_iowait_cpu
The function nr_iowait_cpu() can be used directly by nr_iowait() instead of duplicating code. Call nr_iowait_cpu() from nr_iowait() Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--kernel/sched/core.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe0223121883..9245c56b8f5f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2876,6 +2876,25 @@ unsigned long long nr_context_switches(void)
2876} 2876}
2877 2877
2878/* 2878/*
2879 * Consumers of these two interfaces, like for example the cpuidle menu
2880 * governor, are using nonsensical data. Preferring shallow idle state selection
2881 * for a CPU that has IO-wait which might not even end up running the task when
2882 * it does become runnable.
2883 */
2884
2885unsigned long nr_iowait_cpu(int cpu)
2886{
2887 return atomic_read(&cpu_rq(cpu)->nr_iowait);
2888}
2889
2890void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2891{
2892 struct rq *rq = this_rq();
2893 *nr_waiters = atomic_read(&rq->nr_iowait);
2894 *load = rq->load.weight;
2895}
2896
2897/*
2879 * IO-wait accounting, and how its mostly bollocks (on SMP). 2898 * IO-wait accounting, and how its mostly bollocks (on SMP).
2880 * 2899 *
2881 * The idea behind IO-wait account is to account the idle time that we could 2900 * The idea behind IO-wait account is to account the idle time that we could
@@ -2910,31 +2929,11 @@ unsigned long nr_iowait(void)
2910 unsigned long i, sum = 0; 2929 unsigned long i, sum = 0;
2911 2930
2912 for_each_possible_cpu(i) 2931 for_each_possible_cpu(i)
2913 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2932 sum += nr_iowait_cpu(i);
2914 2933
2915 return sum; 2934 return sum;
2916} 2935}
2917 2936
2918/*
2919 * Consumers of these two interfaces, like for example the cpuidle menu
2920 * governor, are using nonsensical data. Preferring shallow idle state selection
2921 * for a CPU that has IO-wait which might not even end up running the task when
2922 * it does become runnable.
2923 */
2924
2925unsigned long nr_iowait_cpu(int cpu)
2926{
2927 struct rq *this = cpu_rq(cpu);
2928 return atomic_read(&this->nr_iowait);
2929}
2930
2931void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2932{
2933 struct rq *rq = this_rq();
2934 *nr_waiters = atomic_read(&rq->nr_iowait);
2935 *load = rq->load.weight;
2936}
2937
2938#ifdef CONFIG_SMP 2937#ifdef CONFIG_SMP
2939 2938
2940/* 2939/*