aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/time/timekeeping.c10
3 files changed, 37 insertions, 9 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index db8e0f3d40..aab881c86a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -382,12 +382,23 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
382 382
383int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 383int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
384{ 384{
385 int cpu = smp_processor_id();
386
387 /*
388 * If the CPU is marked for broadcast, enforce oneshot
389 * broadcast mode. The jinxed VAIO does not resume otherwise.
390 * No idea why it ends up in a lower C State during resume
391 * without notifying the clock events layer.
392 */
393 if (cpu_isset(cpu, tick_broadcast_mask))
394 cpu_set(cpu, tick_broadcast_oneshot_mask);
395
385 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 396 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
386 397
387 if(!cpus_empty(tick_broadcast_oneshot_mask)) 398 if(!cpus_empty(tick_broadcast_oneshot_mask))
388 tick_broadcast_set_event(ktime_get(), 1); 399 tick_broadcast_set_event(ktime_get(), 1);
389 400
390 return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); 401 return cpu_isset(cpu, tick_broadcast_oneshot_mask);
391} 402}
392 403
393/* 404/*
@@ -549,20 +560,17 @@ void tick_broadcast_switch_to_oneshot(void)
549 */ 560 */
550void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 561void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
551{ 562{
552 struct clock_event_device *bc;
553 unsigned long flags; 563 unsigned long flags;
554 unsigned int cpu = *cpup; 564 unsigned int cpu = *cpup;
555 565
556 spin_lock_irqsave(&tick_broadcast_lock, flags); 566 spin_lock_irqsave(&tick_broadcast_lock, flags);
557 567
558 bc = tick_broadcast_device.evtdev; 568 /*
569 * Clear the broadcast mask flag for the dead cpu, but do not
570 * stop the broadcast device!
571 */
559 cpu_clear(cpu, tick_broadcast_oneshot_mask); 572 cpu_clear(cpu, tick_broadcast_oneshot_mask);
560 573
561 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
562 if (bc && cpus_empty(tick_broadcast_oneshot_mask))
563 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
564 }
565
566 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 574 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
567} 575}
568 576
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b416995b97..8c3fef1db0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -160,6 +160,18 @@ void tick_nohz_stop_sched_tick(void)
160 cpu = smp_processor_id(); 160 cpu = smp_processor_id();
161 ts = &per_cpu(tick_cpu_sched, cpu); 161 ts = &per_cpu(tick_cpu_sched, cpu);
162 162
163 /*
164 * If this cpu is offline and it is the one which updates
165 * jiffies, then give up the assignment and let it be taken by
166 * the cpu which runs the tick timer next. If we don't drop
167 * this here the jiffies might be stale and do_timer() never
168 * invoked.
169 */
170 if (unlikely(!cpu_online(cpu))) {
171 if (cpu == tick_do_timer_cpu)
172 tick_do_timer_cpu = -1;
173 }
174
163 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 175 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
164 goto end; 176 goto end;
165 177
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index acc417b5a9..4ad79f6bde 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -217,6 +217,7 @@ static void change_clocksource(void)
217} 217}
218#else 218#else
219static inline void change_clocksource(void) { } 219static inline void change_clocksource(void) { }
220static inline s64 __get_nsec_offset(void) { return 0; }
220#endif 221#endif
221 222
222/** 223/**
@@ -280,6 +281,8 @@ void __init timekeeping_init(void)
280static int timekeeping_suspended; 281static int timekeeping_suspended;
281/* time in seconds when suspend began */ 282/* time in seconds when suspend began */
282static unsigned long timekeeping_suspend_time; 283static unsigned long timekeeping_suspend_time;
284/* xtime offset when we went into suspend */
285static s64 timekeeping_suspend_nsecs;
283 286
284/** 287/**
285 * timekeeping_resume - Resumes the generic timekeeping subsystem. 288 * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -305,6 +308,8 @@ static int timekeeping_resume(struct sys_device *dev)
305 wall_to_monotonic.tv_sec -= sleep_length; 308 wall_to_monotonic.tv_sec -= sleep_length;
306 total_sleep_time += sleep_length; 309 total_sleep_time += sleep_length;
307 } 310 }
311 /* Make sure that we have the correct xtime reference */
312 timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
308 /* re-base the last cycle value */ 313 /* re-base the last cycle value */
309 clock->cycle_last = clocksource_read(clock); 314 clock->cycle_last = clocksource_read(clock);
310 clock->error = 0; 315 clock->error = 0;
@@ -325,9 +330,12 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
325{ 330{
326 unsigned long flags; 331 unsigned long flags;
327 332
333 timekeeping_suspend_time = read_persistent_clock();
334
328 write_seqlock_irqsave(&xtime_lock, flags); 335 write_seqlock_irqsave(&xtime_lock, flags);
336 /* Get the current xtime offset */
337 timekeeping_suspend_nsecs = __get_nsec_offset();
329 timekeeping_suspended = 1; 338 timekeeping_suspended = 1;
330 timekeeping_suspend_time = read_persistent_clock();
331 write_sequnlock_irqrestore(&xtime_lock, flags); 339 write_sequnlock_irqrestore(&xtime_lock, flags);
332 340
333 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 341 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);