aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-07-01 03:07:17 -0400
committerIngo Molnar <mingo@elte.hu>2010-07-01 03:39:48 -0400
commit8c215bd3890c347dfb6a2db4779755f8b9c298a9 (patch)
treee6bd5de8a028babe9ec75f744977bd1424df106c /kernel
parent9715856922bf8475f5428c29b6f4a9eebc97d391 (diff)
sched: Cure nr_iowait_cpu() users
Commit 0224cf4c5e (sched: Intoduce get_cpu_iowait_time_us()) broke things by not making sure preemption was indeed disabled by the callers of nr_iowait_cpu() which took the iowait value of the current cpu. This resulted in a heap of preempt warnings. Cure this by making nr_iowait_cpu() take a cpu number and fix up the callers to pass in the right number. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: Maxim Levitsky <maximlevitsky@gmail.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Jiri Slaby <jslaby@suse.cz> Cc: linux-pm@lists.linux-foundation.org LKML-Reference: <1277968037.1868.120.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/time/tick-sched.c16
2 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a24d6d5d83f6..f87abe3b0176 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2864,9 +2864,9 @@ unsigned long nr_iowait(void)
2864 return sum; 2864 return sum;
2865} 2865}
2866 2866
2867unsigned long nr_iowait_cpu(void) 2867unsigned long nr_iowait_cpu(int cpu)
2868{ 2868{
2869 struct rq *this = this_rq(); 2869 struct rq *this = cpu_rq(cpu);
2870 return atomic_read(&this->nr_iowait); 2870 return atomic_read(&this->nr_iowait);
2871} 2871}
2872 2872
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1d7b9bc1c034..1a6f828e57a0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now)
154 * Updates the per cpu time idle statistics counters 154 * Updates the per cpu time idle statistics counters
155 */ 155 */
156static void 156static void
157update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) 157update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
158{ 158{
159 ktime_t delta; 159 ktime_t delta;
160 160
161 if (ts->idle_active) { 161 if (ts->idle_active) {
162 delta = ktime_sub(now, ts->idle_entrytime); 162 delta = ktime_sub(now, ts->idle_entrytime);
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 if (nr_iowait_cpu() > 0) 164 if (nr_iowait_cpu(cpu) > 0)
165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
166 ts->idle_entrytime = now; 166 ts->idle_entrytime = now;
167 } 167 }
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
175{ 175{
176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
177 177
178 update_ts_time_stats(ts, now, NULL); 178 update_ts_time_stats(cpu, ts, now, NULL);
179 ts->idle_active = 0; 179 ts->idle_active = 0;
180 180
181 sched_clock_idle_wakeup_event(0); 181 sched_clock_idle_wakeup_event(0);
182} 182}
183 183
184static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
185{ 185{
186 ktime_t now; 186 ktime_t now;
187 187
188 now = ktime_get(); 188 now = ktime_get();
189 189
190 update_ts_time_stats(ts, now, NULL); 190 update_ts_time_stats(cpu, ts, now, NULL);
191 191
192 ts->idle_entrytime = now; 192 ts->idle_entrytime = now;
193 ts->idle_active = 1; 193 ts->idle_active = 1;
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
216 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
217 return -1; 217 return -1;
218 218
219 update_ts_time_stats(ts, ktime_get(), last_update_time); 219 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
220 220
221 return ktime_to_us(ts->idle_sleeptime); 221 return ktime_to_us(ts->idle_sleeptime);
222} 222}
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
242 if (!tick_nohz_enabled) 242 if (!tick_nohz_enabled)
243 return -1; 243 return -1;
244 244
245 update_ts_time_stats(ts, ktime_get(), last_update_time); 245 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
246 246
247 return ktime_to_us(ts->iowait_sleeptime); 247 return ktime_to_us(ts->iowait_sleeptime);
248} 248}
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
284 */ 284 */
285 ts->inidle = 1; 285 ts->inidle = 1;
286 286
287 now = tick_nohz_start_idle(ts); 287 now = tick_nohz_start_idle(cpu, ts);
288 288
289 /* 289 /*
290 * If this cpu is offline and it is the one which updates 290 * If this cpu is offline and it is the one which updates