diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 11:02:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 11:02:40 -0500 |
commit | 423d091dfe58d3109d84c408810a7cfa82f6f184 (patch) | |
tree | 43c4385d1dc7219582f924d42db1f3e203a577bd /kernel/time | |
parent | 1483b3823542c9721eddf09a077af1e02ac96b50 (diff) | |
parent | 919b83452b2e7c1dbced0456015508b4b9585db3 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (64 commits)
cpu: Export cpu_up()
rcu: Apply ACCESS_ONCE() to rcu_boost() return value
Revert "rcu: Permit rt_mutex_unlock() with irqs disabled"
docs: Additional LWN links to RCU API
rcu: Augment rcu_batch_end tracing for idle and callback state
rcu: Add rcutorture tests for srcu_read_lock_raw()
rcu: Make rcutorture test for hotpluggability before offlining CPUs
driver-core/cpu: Expose hotpluggability to the rest of the kernel
rcu: Remove redundant rcu_cpu_stall_suppress declaration
rcu: Adaptive dyntick-idle preparation
rcu: Keep invoking callbacks if CPU otherwise idle
rcu: Irq nesting is always 0 on rcu_enter_idle_common
rcu: Don't check irq nesting from rcu idle entry/exit
rcu: Permit dyntick-idle with callbacks pending
rcu: Document same-context read-side constraints
rcu: Identify dyntick-idle CPUs on first force_quiescent_state() pass
rcu: Remove dynticks false positives and RCU failures
rcu: Reduce latency of rcu_prepare_for_idle()
rcu: Eliminate RCU_FAST_NO_HZ grace-period hang
rcu: Avoid needlessly IPIing CPUs at GP end
...
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/tick-sched.c | 97 |
1 files changed, 60 insertions, 37 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 40420644d0ba..0ec8b832ab6b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -275,42 +275,17 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
275 | } | 275 | } |
276 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | 276 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
277 | 277 | ||
278 | /** | 278 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
279 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | ||
280 | * | ||
281 | * When the next event is more than a tick into the future, stop the idle tick | ||
282 | * Called either from the idle loop or from irq_exit() when an idle period was | ||
283 | * just interrupted by an interrupt which did not cause a reschedule. | ||
284 | */ | ||
285 | void tick_nohz_stop_sched_tick(int inidle) | ||
286 | { | 279 | { |
287 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | 280 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
288 | struct tick_sched *ts; | ||
289 | ktime_t last_update, expires, now; | 281 | ktime_t last_update, expires, now; |
290 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 282 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
291 | u64 time_delta; | 283 | u64 time_delta; |
292 | int cpu; | 284 | int cpu; |
293 | 285 | ||
294 | local_irq_save(flags); | ||
295 | |||
296 | cpu = smp_processor_id(); | 286 | cpu = smp_processor_id(); |
297 | ts = &per_cpu(tick_cpu_sched, cpu); | 287 | ts = &per_cpu(tick_cpu_sched, cpu); |
298 | 288 | ||
299 | /* | ||
300 | * Call to tick_nohz_start_idle stops the last_update_time from being | ||
301 | * updated. Thus, it must not be called in the event we are called from | ||
302 | * irq_exit() with the prior state different than idle. | ||
303 | */ | ||
304 | if (!inidle && !ts->inidle) | ||
305 | goto end; | ||
306 | |||
307 | /* | ||
308 | * Set ts->inidle unconditionally. Even if the system did not | ||
309 | * switch to NOHZ mode the cpu frequency governers rely on the | ||
310 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
311 | */ | ||
312 | ts->inidle = 1; | ||
313 | |||
314 | now = tick_nohz_start_idle(cpu, ts); | 289 | now = tick_nohz_start_idle(cpu, ts); |
315 | 290 | ||
316 | /* | 291 | /* |
@@ -326,10 +301,10 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
326 | } | 301 | } |
327 | 302 | ||
328 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 303 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
329 | goto end; | 304 | return; |
330 | 305 | ||
331 | if (need_resched()) | 306 | if (need_resched()) |
332 | goto end; | 307 | return; |
333 | 308 | ||
334 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | 309 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
335 | static int ratelimit; | 310 | static int ratelimit; |
@@ -339,7 +314,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
339 | (unsigned int) local_softirq_pending()); | 314 | (unsigned int) local_softirq_pending()); |
340 | ratelimit++; | 315 | ratelimit++; |
341 | } | 316 | } |
342 | goto end; | 317 | return; |
343 | } | 318 | } |
344 | 319 | ||
345 | ts->idle_calls++; | 320 | ts->idle_calls++; |
@@ -434,7 +409,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
434 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); | 409 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
435 | ts->tick_stopped = 1; | 410 | ts->tick_stopped = 1; |
436 | ts->idle_jiffies = last_jiffies; | 411 | ts->idle_jiffies = last_jiffies; |
437 | rcu_enter_nohz(); | ||
438 | } | 412 | } |
439 | 413 | ||
440 | ts->idle_sleeps++; | 414 | ts->idle_sleeps++; |
@@ -472,8 +446,56 @@ out: | |||
472 | ts->next_jiffies = next_jiffies; | 446 | ts->next_jiffies = next_jiffies; |
473 | ts->last_jiffies = last_jiffies; | 447 | ts->last_jiffies = last_jiffies; |
474 | ts->sleep_length = ktime_sub(dev->next_event, now); | 448 | ts->sleep_length = ktime_sub(dev->next_event, now); |
475 | end: | 449 | } |
476 | local_irq_restore(flags); | 450 | |
451 | /** | ||
452 | * tick_nohz_idle_enter - stop the idle tick from the idle task | ||
453 | * | ||
454 | * When the next event is more than a tick into the future, stop the idle tick | ||
455 | * Called when we start the idle loop. | ||
456 | * | ||
457 | * The arch is responsible of calling: | ||
458 | * | ||
459 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | ||
460 | * to sleep. | ||
461 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | ||
462 | */ | ||
463 | void tick_nohz_idle_enter(void) | ||
464 | { | ||
465 | struct tick_sched *ts; | ||
466 | |||
467 | WARN_ON_ONCE(irqs_disabled()); | ||
468 | |||
469 | local_irq_disable(); | ||
470 | |||
471 | ts = &__get_cpu_var(tick_cpu_sched); | ||
472 | /* | ||
473 | * set ts->inidle unconditionally. even if the system did not | ||
474 | * switch to nohz mode the cpu frequency governers rely on the | ||
475 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
476 | */ | ||
477 | ts->inidle = 1; | ||
478 | tick_nohz_stop_sched_tick(ts); | ||
479 | |||
480 | local_irq_enable(); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * tick_nohz_irq_exit - update next tick event from interrupt exit | ||
485 | * | ||
486 | * When an interrupt fires while we are idle and it doesn't cause | ||
487 | * a reschedule, it may still add, modify or delete a timer, enqueue | ||
488 | * an RCU callback, etc... | ||
489 | * So we need to re-calculate and reprogram the next tick event. | ||
490 | */ | ||
491 | void tick_nohz_irq_exit(void) | ||
492 | { | ||
493 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | ||
494 | |||
495 | if (!ts->inidle) | ||
496 | return; | ||
497 | |||
498 | tick_nohz_stop_sched_tick(ts); | ||
477 | } | 499 | } |
478 | 500 | ||
479 | /** | 501 | /** |
@@ -515,11 +537,13 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |||
515 | } | 537 | } |
516 | 538 | ||
517 | /** | 539 | /** |
518 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task | 540 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
519 | * | 541 | * |
520 | * Restart the idle tick when the CPU is woken up from idle | 542 | * Restart the idle tick when the CPU is woken up from idle |
543 | * This also exit the RCU extended quiescent state. The CPU | ||
544 | * can use RCU again after this function is called. | ||
521 | */ | 545 | */ |
522 | void tick_nohz_restart_sched_tick(void) | 546 | void tick_nohz_idle_exit(void) |
523 | { | 547 | { |
524 | int cpu = smp_processor_id(); | 548 | int cpu = smp_processor_id(); |
525 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 549 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
@@ -529,6 +553,7 @@ void tick_nohz_restart_sched_tick(void) | |||
529 | ktime_t now; | 553 | ktime_t now; |
530 | 554 | ||
531 | local_irq_disable(); | 555 | local_irq_disable(); |
556 | |||
532 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) | 557 | if (ts->idle_active || (ts->inidle && ts->tick_stopped)) |
533 | now = ktime_get(); | 558 | now = ktime_get(); |
534 | 559 | ||
@@ -543,8 +568,6 @@ void tick_nohz_restart_sched_tick(void) | |||
543 | 568 | ||
544 | ts->inidle = 0; | 569 | ts->inidle = 0; |
545 | 570 | ||
546 | rcu_exit_nohz(); | ||
547 | |||
548 | /* Update jiffies first */ | 571 | /* Update jiffies first */ |
549 | select_nohz_load_balancer(0); | 572 | select_nohz_load_balancer(0); |
550 | tick_do_update_jiffies64(now); | 573 | tick_do_update_jiffies64(now); |