diff options
author | Peter Zijlstra <peterz@infradead.org> | 2010-07-01 03:07:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-01 03:39:48 -0400 |
commit | 8c215bd3890c347dfb6a2db4779755f8b9c298a9 (patch) | |
tree | e6bd5de8a028babe9ec75f744977bd1424df106c | |
parent | 9715856922bf8475f5428c29b6f4a9eebc97d391 (diff) |
sched: Cure nr_iowait_cpu() users
Commit 0224cf4c5e (sched: Intoduce get_cpu_iowait_time_us())
broke things by not making sure preemption was indeed disabled
by the callers of nr_iowait_cpu() which took the iowait value of
the current cpu.
This resulted in a heap of preempt warnings. Cure this by making
nr_iowait_cpu() take a cpu number and fix up the callers to pass
in the right number.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Maxim Levitsky <maximlevitsky@gmail.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: linux-pm@lists.linux-foundation.org
LKML-Reference: <1277968037.1868.120.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 16 |
4 files changed, 13 insertions, 13 deletions
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52ff8aa63f84..1b128702d300 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration) | |||
143 | * This allows us to calculate | 143 | * This allows us to calculate |
144 | * E(duration)|iowait | 144 | * E(duration)|iowait |
145 | */ | 145 | */ |
146 | if (nr_iowait_cpu()) | 146 | if (nr_iowait_cpu(smp_processor_id())) |
147 | bucket = BUCKETS/2; | 147 | bucket = BUCKETS/2; |
148 | 148 | ||
149 | if (duration < 10) | 149 | if (duration < 10) |
@@ -175,7 +175,7 @@ static inline int performance_multiplier(void) | |||
175 | mult += 2 * get_loadavg(); | 175 | mult += 2 * get_loadavg(); |
176 | 176 | ||
177 | /* for IO wait tasks (per cpu!) we add 5x each */ | 177 | /* for IO wait tasks (per cpu!) we add 5x each */ |
178 | mult += 10 * nr_iowait_cpu(); | 178 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
179 | 179 | ||
180 | return mult; | 180 | return mult; |
181 | } | 181 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953f..747fcaedddb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -139,7 +139,7 @@ extern int nr_processes(void); | |||
139 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
140 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
141 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern unsigned long nr_iowait_cpu(void); | 142 | extern unsigned long nr_iowait_cpu(int cpu); |
143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
144 | 144 | ||
145 | 145 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index a24d6d5d83f6..f87abe3b0176 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2864,9 +2864,9 @@ unsigned long nr_iowait(void) | |||
2864 | return sum; | 2864 | return sum; |
2865 | } | 2865 | } |
2866 | 2866 | ||
2867 | unsigned long nr_iowait_cpu(void) | 2867 | unsigned long nr_iowait_cpu(int cpu) |
2868 | { | 2868 | { |
2869 | struct rq *this = this_rq(); | 2869 | struct rq *this = cpu_rq(cpu); |
2870 | return atomic_read(&this->nr_iowait); | 2870 | return atomic_read(&this->nr_iowait); |
2871 | } | 2871 | } |
2872 | 2872 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1d7b9bc1c034..1a6f828e57a0 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) | |||
154 | * Updates the per cpu time idle statistics counters | 154 | * Updates the per cpu time idle statistics counters |
155 | */ | 155 | */ |
156 | static void | 156 | static void |
157 | update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) | 157 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
158 | { | 158 | { |
159 | ktime_t delta; | 159 | ktime_t delta; |
160 | 160 | ||
161 | if (ts->idle_active) { | 161 | if (ts->idle_active) { |
162 | delta = ktime_sub(now, ts->idle_entrytime); | 162 | delta = ktime_sub(now, ts->idle_entrytime); |
163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
164 | if (nr_iowait_cpu() > 0) | 164 | if (nr_iowait_cpu(cpu) > 0) |
165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
166 | ts->idle_entrytime = now; | 166 | ts->idle_entrytime = now; |
167 | } | 167 | } |
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) | |||
175 | { | 175 | { |
176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
177 | 177 | ||
178 | update_ts_time_stats(ts, now, NULL); | 178 | update_ts_time_stats(cpu, ts, now, NULL); |
179 | ts->idle_active = 0; | 179 | ts->idle_active = 0; |
180 | 180 | ||
181 | sched_clock_idle_wakeup_event(0); | 181 | sched_clock_idle_wakeup_event(0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | 184 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
185 | { | 185 | { |
186 | ktime_t now; | 186 | ktime_t now; |
187 | 187 | ||
188 | now = ktime_get(); | 188 | now = ktime_get(); |
189 | 189 | ||
190 | update_ts_time_stats(ts, now, NULL); | 190 | update_ts_time_stats(cpu, ts, now, NULL); |
191 | 191 | ||
192 | ts->idle_entrytime = now; | 192 | ts->idle_entrytime = now; |
193 | ts->idle_active = 1; | 193 | ts->idle_active = 1; |
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
216 | if (!tick_nohz_enabled) | 216 | if (!tick_nohz_enabled) |
217 | return -1; | 217 | return -1; |
218 | 218 | ||
219 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 219 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
220 | 220 | ||
221 | return ktime_to_us(ts->idle_sleeptime); | 221 | return ktime_to_us(ts->idle_sleeptime); |
222 | } | 222 | } |
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
242 | if (!tick_nohz_enabled) | 242 | if (!tick_nohz_enabled) |
243 | return -1; | 243 | return -1; |
244 | 244 | ||
245 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 245 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
246 | 246 | ||
247 | return ktime_to_us(ts->iowait_sleeptime); | 247 | return ktime_to_us(ts->iowait_sleeptime); |
248 | } | 248 | } |
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
284 | */ | 284 | */ |
285 | ts->inidle = 1; | 285 | ts->inidle = 1; |
286 | 286 | ||
287 | now = tick_nohz_start_idle(ts); | 287 | now = tick_nohz_start_idle(cpu, ts); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * If this cpu is offline and it is the one which updates | 290 | * If this cpu is offline and it is the one which updates |