aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c24
-rw-r--r--kernel/power/Kconfig37
-rw-r--r--kernel/rcutorture.c14
-rw-r--r--kernel/sched.c155
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/tick-broadcast.c36
-rw-r--r--kernel/time/tick-common.c32
-rw-r--r--kernel/time/tick-internal.h4
-rw-r--r--kernel/timer.c16
9 files changed, 132 insertions, 194 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 476cb0c0b4a4..ec4cb9f3e3b7 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -540,19 +540,19 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
540/* 540/*
541 * Switch to high resolution mode 541 * Switch to high resolution mode
542 */ 542 */
543static void hrtimer_switch_to_hres(void) 543static int hrtimer_switch_to_hres(void)
544{ 544{
545 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 545 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
546 unsigned long flags; 546 unsigned long flags;
547 547
548 if (base->hres_active) 548 if (base->hres_active)
549 return; 549 return 1;
550 550
551 local_irq_save(flags); 551 local_irq_save(flags);
552 552
553 if (tick_init_highres()) { 553 if (tick_init_highres()) {
554 local_irq_restore(flags); 554 local_irq_restore(flags);
555 return; 555 return 0;
556 } 556 }
557 base->hres_active = 1; 557 base->hres_active = 1;
558 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; 558 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
@@ -565,13 +565,14 @@ static void hrtimer_switch_to_hres(void)
565 local_irq_restore(flags); 565 local_irq_restore(flags);
566 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", 566 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
567 smp_processor_id()); 567 smp_processor_id());
568 return 1;
568} 569}
569 570
570#else 571#else
571 572
572static inline int hrtimer_hres_active(void) { return 0; } 573static inline int hrtimer_hres_active(void) { return 0; }
573static inline int hrtimer_is_hres_enabled(void) { return 0; } 574static inline int hrtimer_is_hres_enabled(void) { return 0; }
574static inline void hrtimer_switch_to_hres(void) { } 575static inline int hrtimer_switch_to_hres(void) { return 0; }
575static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 576static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
576static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 577static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
577 struct hrtimer_clock_base *base) 578 struct hrtimer_clock_base *base)
@@ -1130,6 +1131,9 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1130 if (base->softirq_time.tv64 <= timer->expires.tv64) 1131 if (base->softirq_time.tv64 <= timer->expires.tv64)
1131 break; 1132 break;
1132 1133
1134#ifdef CONFIG_HIGH_RES_TIMERS
1135 WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
1136#endif
1133 timer_stats_account_hrtimer(timer); 1137 timer_stats_account_hrtimer(timer);
1134 1138
1135 fn = timer->function; 1139 fn = timer->function;
@@ -1173,7 +1177,8 @@ void hrtimer_run_queues(void)
1173 * deadlock vs. xtime_lock. 1177 * deadlock vs. xtime_lock.
1174 */ 1178 */
1175 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1179 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1176 hrtimer_switch_to_hres(); 1180 if (hrtimer_switch_to_hres())
1181 return;
1177 1182
1178 hrtimer_get_softirq_time(cpu_base); 1183 hrtimer_get_softirq_time(cpu_base);
1179 1184
@@ -1355,17 +1360,16 @@ static void migrate_hrtimers(int cpu)
1355 tick_cancel_sched_timer(cpu); 1360 tick_cancel_sched_timer(cpu);
1356 1361
1357 local_irq_disable(); 1362 local_irq_disable();
1358 1363 double_spin_lock(&new_base->lock, &old_base->lock,
1359 spin_lock(&new_base->lock); 1364 smp_processor_id() < cpu);
1360 spin_lock(&old_base->lock);
1361 1365
1362 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1366 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1363 migrate_hrtimer_list(&old_base->clock_base[i], 1367 migrate_hrtimer_list(&old_base->clock_base[i],
1364 &new_base->clock_base[i]); 1368 &new_base->clock_base[i]);
1365 } 1369 }
1366 spin_unlock(&old_base->lock);
1367 spin_unlock(&new_base->lock);
1368 1370
1371 double_spin_unlock(&new_base->lock, &old_base->lock,
1372 smp_processor_id() < cpu);
1369 local_irq_enable(); 1373 local_irq_enable();
1370 put_cpu_var(hrtimer_bases); 1374 put_cpu_var(hrtimer_bases);
1371} 1375}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 95f6657fff73..51a4dd0f1b74 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -81,29 +81,34 @@ config SOFTWARE_SUSPEND
81 bool "Software Suspend" 81 bool "Software Suspend"
82 depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) 82 depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
83 ---help--- 83 ---help---
84 Enable the possibility of suspending the machine. 84 Enable the suspend to disk (STD) functionality.
85 It doesn't need ACPI or APM.
86 You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
87 (patch for sysvinit needed).
88 85
89 It creates an image which is saved in your active swap. Upon next 86 You can suspend your machine with 'echo disk > /sys/power/state'.
87 Alternatively, you can use the additional userland tools available
88 from <http://suspend.sf.net>.
89
90 In principle it does not require ACPI or APM, although for example
91 ACPI will be used if available.
92
93 It creates an image which is saved in your active swap. Upon the next
90 boot, pass the 'resume=/dev/swappartition' argument to the kernel to 94 boot, pass the 'resume=/dev/swappartition' argument to the kernel to
91 have it detect the saved image, restore memory state from it, and 95 have it detect the saved image, restore memory state from it, and
92 continue to run as before. If you do not want the previous state to 96 continue to run as before. If you do not want the previous state to
93 be reloaded, then use the 'noresume' kernel argument. However, note 97 be reloaded, then use the 'noresume' kernel command line argument.
94 that your partitions will be fsck'd and you must re-mkswap your swap 98 Note, however, that fsck will be run on your filesystems and you will
95 partitions. It does not work with swap files. 99 need to run mkswap against the swap partition used for the suspend.
96 100
97 Right now you may boot without resuming and then later resume but 101 It also works with swap files to a limited extent (for details see
98 in meantime you cannot use those swap partitions/files which were 102 <file:Documentation/power/swsusp-and-swap-files.txt>).
99 involved in suspending. Also in this case there is a risk that buffers
100 on disk won't match with saved ones.
101 103
102 For more information take a look at <file:Documentation/power/swsusp.txt>. 104 Right now you may boot without resuming and resume later but in the
105 meantime you cannot use the swap partition(s)/file(s) involved in
106 suspending. Also in this case you must not use the filesystems
107 that were mounted before the suspend. In particular, you MUST NOT
108 MOUNT any journaled filesystems mounted before the suspend or they
109 will get corrupted in a nasty way.
103 110
104 (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386. 111 For more information take a look at <file:Documentation/power/swsusp.txt>.
105 we need identity mapping for resume to work, and that is trivial
106 to get with 4MB pages, but less than trivial on PAE).
107 112
108config PM_STD_PARTITION 113config PM_STD_PARTITION
109 string "Default resume partition" 114 string "Default resume partition"
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 482b11ff65cb..bcd14e83ef39 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -60,19 +60,19 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
60static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ 60static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
61static char *torture_type = "rcu"; /* What RCU implementation to torture. */ 61static char *torture_type = "rcu"; /* What RCU implementation to torture. */
62 62
63module_param(nreaders, int, 0); 63module_param(nreaders, int, 0444);
64MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 64MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
65module_param(nfakewriters, int, 0); 65module_param(nfakewriters, int, 0444);
66MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); 66MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
67module_param(stat_interval, int, 0); 67module_param(stat_interval, int, 0444);
68MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); 68MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
69module_param(verbose, bool, 0); 69module_param(verbose, bool, 0444);
70MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); 70MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
71module_param(test_no_idle_hz, bool, 0); 71module_param(test_no_idle_hz, bool, 0444);
72MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); 72MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
73module_param(shuffle_interval, int, 0); 73module_param(shuffle_interval, int, 0444);
74MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); 74MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
75module_param(torture_type, charp, 0); 75module_param(torture_type, charp, 0444);
76MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); 76MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
77 77
78#define TORTURE_FLAG "-torture:" 78#define TORTURE_FLAG "-torture:"
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f102e6c7a4c..a4ca632c477c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
3006} 3006}
3007#endif 3007#endif
3008 3008
3009static inline void wake_priority_sleeper(struct rq *rq)
3010{
3011#ifdef CONFIG_SCHED_SMT
3012 if (!rq->nr_running)
3013 return;
3014
3015 spin_lock(&rq->lock);
3016 /*
3017 * If an SMT sibling task has been put to sleep for priority
3018 * reasons reschedule the idle task to see if it can now run.
3019 */
3020 if (rq->nr_running)
3021 resched_task(rq->idle);
3022 spin_unlock(&rq->lock);
3023#endif
3024}
3025
3026DEFINE_PER_CPU(struct kernel_stat, kstat); 3009DEFINE_PER_CPU(struct kernel_stat, kstat);
3027 3010
3028EXPORT_PER_CPU_SYMBOL(kstat); 3011EXPORT_PER_CPU_SYMBOL(kstat);
@@ -3239,10 +3222,7 @@ void scheduler_tick(void)
3239 3222
3240 update_cpu_clock(p, rq, now); 3223 update_cpu_clock(p, rq, now);
3241 3224
3242 if (p == rq->idle) 3225 if (p != rq->idle)
3243 /* Task on the idle queue */
3244 wake_priority_sleeper(rq);
3245 else
3246 task_running_tick(rq, p); 3226 task_running_tick(rq, p);
3247#ifdef CONFIG_SMP 3227#ifdef CONFIG_SMP
3248 update_load(rq); 3228 update_load(rq);
@@ -3251,136 +3231,6 @@ void scheduler_tick(void)
3251#endif 3231#endif
3252} 3232}
3253 3233
3254#ifdef CONFIG_SCHED_SMT
3255static inline void wakeup_busy_runqueue(struct rq *rq)
3256{
3257 /* If an SMT runqueue is sleeping due to priority reasons wake it up */
3258 if (rq->curr == rq->idle && rq->nr_running)
3259 resched_task(rq->idle);
3260}
3261
3262/*
3263 * Called with interrupt disabled and this_rq's runqueue locked.
3264 */
3265static void wake_sleeping_dependent(int this_cpu)
3266{
3267 struct sched_domain *tmp, *sd = NULL;
3268 int i;
3269
3270 for_each_domain(this_cpu, tmp) {
3271 if (tmp->flags & SD_SHARE_CPUPOWER) {
3272 sd = tmp;
3273 break;
3274 }
3275 }
3276
3277 if (!sd)
3278 return;
3279
3280 for_each_cpu_mask(i, sd->span) {
3281 struct rq *smt_rq = cpu_rq(i);
3282
3283 if (i == this_cpu)
3284 continue;
3285 if (unlikely(!spin_trylock(&smt_rq->lock)))
3286 continue;
3287
3288 wakeup_busy_runqueue(smt_rq);
3289 spin_unlock(&smt_rq->lock);
3290 }
3291}
3292
3293/*
3294 * number of 'lost' timeslices this task wont be able to fully
3295 * utilize, if another task runs on a sibling. This models the
3296 * slowdown effect of other tasks running on siblings:
3297 */
3298static inline unsigned long
3299smt_slice(struct task_struct *p, struct sched_domain *sd)
3300{
3301 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
3302}
3303
3304/*
3305 * To minimise lock contention and not have to drop this_rq's runlock we only
3306 * trylock the sibling runqueues and bypass those runqueues if we fail to
3307 * acquire their lock. As we only trylock the normal locking order does not
3308 * need to be obeyed.
3309 */
3310static int
3311dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3312{
3313 struct sched_domain *tmp, *sd = NULL;
3314 int ret = 0, i;
3315
3316 /* kernel/rt threads do not participate in dependent sleeping */
3317 if (!p->mm || rt_task(p))
3318 return 0;
3319
3320 for_each_domain(this_cpu, tmp) {
3321 if (tmp->flags & SD_SHARE_CPUPOWER) {
3322 sd = tmp;
3323 break;
3324 }
3325 }
3326
3327 if (!sd)
3328 return 0;
3329
3330 for_each_cpu_mask(i, sd->span) {
3331 struct task_struct *smt_curr;
3332 struct rq *smt_rq;
3333
3334 if (i == this_cpu)
3335 continue;
3336
3337 smt_rq = cpu_rq(i);
3338 if (unlikely(!spin_trylock(&smt_rq->lock)))
3339 continue;
3340
3341 smt_curr = smt_rq->curr;
3342
3343 if (!smt_curr->mm)
3344 goto unlock;
3345
3346 /*
3347 * If a user task with lower static priority than the
3348 * running task on the SMT sibling is trying to schedule,
3349 * delay it till there is proportionately less timeslice
3350 * left of the sibling task to prevent a lower priority
3351 * task from using an unfair proportion of the
3352 * physical cpu's resources. -ck
3353 */
3354 if (rt_task(smt_curr)) {
3355 /*
3356 * With real time tasks we run non-rt tasks only
3357 * per_cpu_gain% of the time.
3358 */
3359 if ((jiffies % DEF_TIMESLICE) >
3360 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
3361 ret = 1;
3362 } else {
3363 if (smt_curr->static_prio < p->static_prio &&
3364 !TASK_PREEMPTS_CURR(p, smt_rq) &&
3365 smt_slice(smt_curr, sd) > task_timeslice(p))
3366 ret = 1;
3367 }
3368unlock:
3369 spin_unlock(&smt_rq->lock);
3370 }
3371 return ret;
3372}
3373#else
3374static inline void wake_sleeping_dependent(int this_cpu)
3375{
3376}
3377static inline int
3378dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3379{
3380 return 0;
3381}
3382#endif
3383
3384#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3234#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3385 3235
3386void fastcall add_preempt_count(int val) 3236void fastcall add_preempt_count(int val)
@@ -3507,7 +3357,6 @@ need_resched_nonpreemptible:
3507 if (!rq->nr_running) { 3357 if (!rq->nr_running) {
3508 next = rq->idle; 3358 next = rq->idle;
3509 rq->expired_timestamp = 0; 3359 rq->expired_timestamp = 0;
3510 wake_sleeping_dependent(cpu);
3511 goto switch_tasks; 3360 goto switch_tasks;
3512 } 3361 }
3513 } 3362 }
@@ -3547,8 +3396,6 @@ need_resched_nonpreemptible:
3547 } 3396 }
3548 } 3397 }
3549 next->sleep_type = SLEEP_NORMAL; 3398 next->sleep_type = SLEEP_NORMAL;
3550 if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
3551 next = rq->idle;
3552switch_tasks: 3399switch_tasks:
3553 if (next == rq->idle) 3400 if (next == rq->idle)
3554 schedstat_inc(rq, sched_goidle); 3401 schedstat_inc(rq, sched_goidle);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 193a0793af95..5b0e46b56fd0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock);
55static char override_name[32]; 55static char override_name[32];
56static int finished_booting; 56static int finished_booting;
57 57
58/* clocksource_done_booting - Called near the end of bootup 58/* clocksource_done_booting - Called near the end of core bootup
59 * 59 *
60 * Hack to avoid lots of clocksource churn at boot time 60 * Hack to avoid lots of clocksource churn at boot time.
61 * We use fs_initcall because we want this to start before
62 * device_initcall but after subsys_initcall.
61 */ 63 */
62static int __init clocksource_done_booting(void) 64static int __init clocksource_done_booting(void)
63{ 65{
64 finished_booting = 1; 66 finished_booting = 1;
65 return 0; 67 return 0;
66} 68}
67late_initcall(clocksource_done_booting); 69fs_initcall(clocksource_done_booting);
68 70
69#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 71#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
70static LIST_HEAD(watchdog_list); 72static LIST_HEAD(watchdog_list);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 12b3efeb9f6f..5567745470f7 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -284,6 +284,42 @@ void tick_shutdown_broadcast(unsigned int *cpup)
284 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 284 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
285} 285}
286 286
287void tick_suspend_broadcast(void)
288{
289 struct clock_event_device *bc;
290 unsigned long flags;
291
292 spin_lock_irqsave(&tick_broadcast_lock, flags);
293
294 bc = tick_broadcast_device.evtdev;
295 if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
296 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
297
298 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
299}
300
301int tick_resume_broadcast(void)
302{
303 struct clock_event_device *bc;
304 unsigned long flags;
305 int broadcast = 0;
306
307 spin_lock_irqsave(&tick_broadcast_lock, flags);
308
309 bc = tick_broadcast_device.evtdev;
310 if (bc) {
311 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC &&
312 !cpus_empty(tick_broadcast_mask))
313 tick_broadcast_start_periodic(bc);
314
315 broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask);
316 }
317 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
318
319 return broadcast;
320}
321
322
287#ifdef CONFIG_TICK_ONESHOT 323#ifdef CONFIG_TICK_ONESHOT
288 324
289static cpumask_t tick_broadcast_oneshot_mask; 325static cpumask_t tick_broadcast_oneshot_mask;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 0986a2bfab49..43ba1bdec14c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -298,6 +298,28 @@ static void tick_shutdown(unsigned int *cpup)
298 spin_unlock_irqrestore(&tick_device_lock, flags); 298 spin_unlock_irqrestore(&tick_device_lock, flags);
299} 299}
300 300
301static void tick_suspend_periodic(void)
302{
303 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
304 unsigned long flags;
305
306 spin_lock_irqsave(&tick_device_lock, flags);
307 if (td->mode == TICKDEV_MODE_PERIODIC)
308 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
309 spin_unlock_irqrestore(&tick_device_lock, flags);
310}
311
312static void tick_resume_periodic(void)
313{
314 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
315 unsigned long flags;
316
317 spin_lock_irqsave(&tick_device_lock, flags);
318 if (td->mode == TICKDEV_MODE_PERIODIC)
319 tick_setup_periodic(td->evtdev, 0);
320 spin_unlock_irqrestore(&tick_device_lock, flags);
321}
322
301/* 323/*
302 * Notification about clock event devices 324 * Notification about clock event devices
303 */ 325 */
@@ -325,6 +347,16 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
325 tick_shutdown(dev); 347 tick_shutdown(dev);
326 break; 348 break;
327 349
350 case CLOCK_EVT_NOTIFY_SUSPEND:
351 tick_suspend_periodic();
352 tick_suspend_broadcast();
353 break;
354
355 case CLOCK_EVT_NOTIFY_RESUME:
356 if (!tick_resume_broadcast())
357 tick_resume_periodic();
358 break;
359
328 default: 360 default:
329 break; 361 break;
330 } 362 }
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 54861a0f29ff..75890efd24ff 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -67,6 +67,8 @@ extern int tick_check_broadcast_device(struct clock_event_device *dev);
67extern int tick_is_broadcast_device(struct clock_event_device *dev); 67extern int tick_is_broadcast_device(struct clock_event_device *dev);
68extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); 68extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
69extern void tick_shutdown_broadcast(unsigned int *cpup); 69extern void tick_shutdown_broadcast(unsigned int *cpup);
70extern void tick_suspend_broadcast(void);
71extern int tick_resume_broadcast(void);
70 72
71extern void 73extern void
72tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); 74tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
@@ -90,6 +92,8 @@ static inline int tick_device_uses_broadcast(struct clock_event_device *dev,
90static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } 92static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
91static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } 93static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
92static inline void tick_shutdown_broadcast(unsigned int *cpup) { } 94static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
95static inline void tick_suspend_broadcast(void) { }
96static inline int tick_resume_broadcast(void) { return 0; }
93 97
94/* 98/*
95 * Set the periodic handler in non broadcast mode 99 * Set the periodic handler in non broadcast mode
diff --git a/kernel/timer.c b/kernel/timer.c
index 6663a87f7304..797cccb86431 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -862,6 +862,8 @@ int do_settimeofday(struct timespec *tv)
862 clock->error = 0; 862 clock->error = 0;
863 ntp_clear(); 863 ntp_clear();
864 864
865 update_vsyscall(&xtime, clock);
866
865 write_sequnlock_irqrestore(&xtime_lock, flags); 867 write_sequnlock_irqrestore(&xtime_lock, flags);
866 868
867 /* signal hrtimers about time change */ 869 /* signal hrtimers about time change */
@@ -997,6 +999,9 @@ static int timekeeping_resume(struct sys_device *dev)
997 write_sequnlock_irqrestore(&xtime_lock, flags); 999 write_sequnlock_irqrestore(&xtime_lock, flags);
998 1000
999 touch_softlockup_watchdog(); 1001 touch_softlockup_watchdog();
1002
1003 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1004
1000 /* Resume hrtimers */ 1005 /* Resume hrtimers */
1001 clock_was_set(); 1006 clock_was_set();
1002 1007
@@ -1011,6 +1016,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
1011 timekeeping_suspended = 1; 1016 timekeeping_suspended = 1;
1012 timekeeping_suspend_time = read_persistent_clock(); 1017 timekeeping_suspend_time = read_persistent_clock();
1013 write_sequnlock_irqrestore(&xtime_lock, flags); 1018 write_sequnlock_irqrestore(&xtime_lock, flags);
1019
1020 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1021
1014 return 0; 1022 return 0;
1015} 1023}
1016 1024
@@ -1651,8 +1659,8 @@ static void __devinit migrate_timers(int cpu)
1651 new_base = get_cpu_var(tvec_bases); 1659 new_base = get_cpu_var(tvec_bases);
1652 1660
1653 local_irq_disable(); 1661 local_irq_disable();
1654 spin_lock(&new_base->lock); 1662 double_spin_lock(&new_base->lock, &old_base->lock,
1655 spin_lock(&old_base->lock); 1663 smp_processor_id() < cpu);
1656 1664
1657 BUG_ON(old_base->running_timer); 1665 BUG_ON(old_base->running_timer);
1658 1666
@@ -1665,8 +1673,8 @@ static void __devinit migrate_timers(int cpu)
1665 migrate_timer_list(new_base, old_base->tv5.vec + i); 1673 migrate_timer_list(new_base, old_base->tv5.vec + i);
1666 } 1674 }
1667 1675
1668 spin_unlock(&old_base->lock); 1676 double_spin_unlock(&new_base->lock, &old_base->lock,
1669 spin_unlock(&new_base->lock); 1677 smp_processor_id() < cpu);
1670 local_irq_enable(); 1678 local_irq_enable();
1671 put_cpu_var(tvec_bases); 1679 put_cpu_var(tvec_bases);
1672} 1680}