diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/time | |
parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/clockevents.c | 27 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 12 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 2 | ||||
-rw-r--r-- | kernel/time/timer_stats.c | 16 |
6 files changed, 32 insertions, 34 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1ad6dd461119..620b58abdc32 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, | |||
137 | */ | 137 | */ |
138 | int clockevents_register_notifier(struct notifier_block *nb) | 138 | int clockevents_register_notifier(struct notifier_block *nb) |
139 | { | 139 | { |
140 | unsigned long flags; | ||
140 | int ret; | 141 | int ret; |
141 | 142 | ||
142 | spin_lock(&clockevents_lock); | 143 | spin_lock_irqsave(&clockevents_lock, flags); |
143 | ret = raw_notifier_chain_register(&clockevents_chain, nb); | 144 | ret = raw_notifier_chain_register(&clockevents_chain, nb); |
144 | spin_unlock(&clockevents_lock); | 145 | spin_unlock_irqrestore(&clockevents_lock, flags); |
145 | 146 | ||
146 | return ret; | 147 | return ret; |
147 | } | 148 | } |
@@ -178,16 +179,18 @@ static void clockevents_notify_released(void) | |||
178 | */ | 179 | */ |
179 | void clockevents_register_device(struct clock_event_device *dev) | 180 | void clockevents_register_device(struct clock_event_device *dev) |
180 | { | 181 | { |
182 | unsigned long flags; | ||
183 | |||
181 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 184 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
182 | BUG_ON(!dev->cpumask); | 185 | BUG_ON(!dev->cpumask); |
183 | 186 | ||
184 | spin_lock(&clockevents_lock); | 187 | spin_lock_irqsave(&clockevents_lock, flags); |
185 | 188 | ||
186 | list_add(&dev->list, &clockevent_devices); | 189 | list_add(&dev->list, &clockevent_devices); |
187 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); | 190 | clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); |
188 | clockevents_notify_released(); | 191 | clockevents_notify_released(); |
189 | 192 | ||
190 | spin_unlock(&clockevents_lock); | 193 | spin_unlock_irqrestore(&clockevents_lock, flags); |
191 | } | 194 | } |
192 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 195 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
193 | 196 | ||
@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
235 | void clockevents_notify(unsigned long reason, void *arg) | 238 | void clockevents_notify(unsigned long reason, void *arg) |
236 | { | 239 | { |
237 | struct list_head *node, *tmp; | 240 | struct list_head *node, *tmp; |
241 | unsigned long flags; | ||
238 | 242 | ||
239 | spin_lock(&clockevents_lock); | 243 | spin_lock_irqsave(&clockevents_lock, flags); |
240 | clockevents_do_notify(reason, arg); | 244 | clockevents_do_notify(reason, arg); |
241 | 245 | ||
242 | switch (reason) { | 246 | switch (reason) { |
@@ -251,18 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
251 | default: | 255 | default: |
252 | break; | 256 | break; |
253 | } | 257 | } |
254 | spin_unlock(&clockevents_lock); | 258 | spin_unlock_irqrestore(&clockevents_lock, flags); |
255 | } | 259 | } |
256 | EXPORT_SYMBOL_GPL(clockevents_notify); | 260 | EXPORT_SYMBOL_GPL(clockevents_notify); |
257 | |||
258 | ktime_t clockevents_get_next_event(int cpu) | ||
259 | { | ||
260 | struct tick_device *td; | ||
261 | struct clock_event_device *dev; | ||
262 | |||
263 | td = &per_cpu(tick_cpu_device, cpu); | ||
264 | dev = td->evtdev; | ||
265 | |||
266 | return dev->next_event; | ||
267 | } | ||
268 | #endif | 261 | #endif |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 592bf584d1d2..7466cb811251 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
513 | * Check to make sure we don't switch to a non-highres capable | 513 | * Check to make sure we don't switch to a non-highres capable |
514 | * clocksource if the tick code is in oneshot mode (highres or nohz) | 514 | * clocksource if the tick code is in oneshot mode (highres or nohz) |
515 | */ | 515 | */ |
516 | if (tick_oneshot_mode_active() && | 516 | if (tick_oneshot_mode_active() && ovr && |
517 | !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { | 517 | !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { |
518 | printk(KERN_WARNING "%s clocksource is not HRT compatible. " | 518 | printk(KERN_WARNING "%s clocksource is not HRT compatible. " |
519 | "Cannot switch while in HRT/NOHZ mode\n", ovr->name); | 519 | "Cannot switch while in HRT/NOHZ mode\n", ovr->name); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 877dbedc3118..c2ec25087a35 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
205 | * Powerstate information: The system enters/leaves a state, where | 205 | * Powerstate information: The system enters/leaves a state, where |
206 | * affected devices might stop | 206 | * affected devices might stop |
207 | */ | 207 | */ |
208 | static void tick_do_broadcast_on_off(void *why) | 208 | static void tick_do_broadcast_on_off(unsigned long *reason) |
209 | { | 209 | { |
210 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
211 | struct tick_device *td; | 211 | struct tick_device *td; |
212 | unsigned long flags, *reason = why; | 212 | unsigned long flags; |
213 | int cpu, bc_stopped; | 213 | int cpu, bc_stopped; |
214 | 214 | ||
215 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu) | |||
276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " | 276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " |
277 | "offline CPU #%d\n", *oncpu); | 277 | "offline CPU #%d\n", *oncpu); |
278 | else | 278 | else |
279 | smp_call_function_single(*oncpu, tick_do_broadcast_on_off, | 279 | tick_do_broadcast_on_off(&reason); |
280 | &reason, 1); | ||
281 | } | 280 | } |
282 | 281 | ||
283 | /* | 282 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2aff39c6f10c..e0f59a21c061 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -222,6 +222,15 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
222 | 222 | ||
223 | cpu = smp_processor_id(); | 223 | cpu = smp_processor_id(); |
224 | ts = &per_cpu(tick_cpu_sched, cpu); | 224 | ts = &per_cpu(tick_cpu_sched, cpu); |
225 | |||
226 | /* | ||
227 | * Call to tick_nohz_start_idle stops the last_update_time from being | ||
228 | * updated. Thus, it must not be called in the event we are called from | ||
229 | * irq_exit() with the prior state different than idle. | ||
230 | */ | ||
231 | if (!inidle && !ts->inidle) | ||
232 | goto end; | ||
233 | |||
225 | now = tick_nohz_start_idle(ts); | 234 | now = tick_nohz_start_idle(ts); |
226 | 235 | ||
227 | /* | 236 | /* |
@@ -239,9 +248,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
239 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 248 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
240 | goto end; | 249 | goto end; |
241 | 250 | ||
242 | if (!inidle && !ts->inidle) | ||
243 | goto end; | ||
244 | |||
245 | ts->inidle = 1; | 251 | ts->inidle = 1; |
246 | 252 | ||
247 | if (need_resched()) | 253 | if (need_resched()) |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a999b92a1277..fddd69d16e03 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -286,7 +286,7 @@ static int __init init_timer_list_procfs(void) | |||
286 | { | 286 | { |
287 | struct proc_dir_entry *pe; | 287 | struct proc_dir_entry *pe; |
288 | 288 | ||
289 | pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); | 289 | pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); |
290 | if (!pe) | 290 | if (!pe) |
291 | return -ENOMEM; | 291 | return -ENOMEM; |
292 | return 0; | 292 | return 0; |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index c994530d166d..4cde8b9c716f 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex); | |||
96 | /* | 96 | /* |
97 | * Collection status, active/inactive: | 97 | * Collection status, active/inactive: |
98 | */ | 98 | */ |
99 | static int __read_mostly active; | 99 | int __read_mostly timer_stats_active; |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * Beginning/end timestamps of measurement: | 102 | * Beginning/end timestamps of measurement: |
@@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
242 | struct entry *entry, input; | 242 | struct entry *entry, input; |
243 | unsigned long flags; | 243 | unsigned long flags; |
244 | 244 | ||
245 | if (likely(!active)) | 245 | if (likely(!timer_stats_active)) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); |
@@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
254 | input.timer_flag = timer_flag; | 254 | input.timer_flag = timer_flag; |
255 | 255 | ||
256 | spin_lock_irqsave(lock, flags); | 256 | spin_lock_irqsave(lock, flags); |
257 | if (!active) | 257 | if (!timer_stats_active) |
258 | goto out_unlock; | 258 | goto out_unlock; |
259 | 259 | ||
260 | entry = tstat_lookup(&input, comm); | 260 | entry = tstat_lookup(&input, comm); |
@@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v) | |||
290 | /* | 290 | /* |
291 | * If still active then calculate up to now: | 291 | * If still active then calculate up to now: |
292 | */ | 292 | */ |
293 | if (active) | 293 | if (timer_stats_active) |
294 | time_stop = ktime_get(); | 294 | time_stop = ktime_get(); |
295 | 295 | ||
296 | time = ktime_sub(time_stop, time_start); | 296 | time = ktime_sub(time_stop, time_start); |
@@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf, | |||
368 | mutex_lock(&show_mutex); | 368 | mutex_lock(&show_mutex); |
369 | switch (ctl[0]) { | 369 | switch (ctl[0]) { |
370 | case '0': | 370 | case '0': |
371 | if (active) { | 371 | if (timer_stats_active) { |
372 | active = 0; | 372 | timer_stats_active = 0; |
373 | time_stop = ktime_get(); | 373 | time_stop = ktime_get(); |
374 | sync_access(); | 374 | sync_access(); |
375 | } | 375 | } |
376 | break; | 376 | break; |
377 | case '1': | 377 | case '1': |
378 | if (!active) { | 378 | if (!timer_stats_active) { |
379 | reset_entries(); | 379 | reset_entries(); |
380 | time_start = ktime_get(); | 380 | time_start = ktime_get(); |
381 | smp_mb(); | 381 | smp_mb(); |
382 | active = 1; | 382 | timer_stats_active = 1; |
383 | } | 383 | } |
384 | break; | 384 | break; |
385 | default: | 385 | default: |