aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c24
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/hrtimer.c11
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/power/console.c10
-rw-r--r--kernel/power/disk.c7
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/time/clockevents.c69
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/ntp.c30
-rw-r--r--kernel/time/tick-broadcast.c27
-rw-r--r--kernel/time/tick-common.c13
-rw-r--r--kernel/time/tick-internal.h11
-rw-r--r--kernel/time/tick-oneshot.c12
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/timer.c19
17 files changed, 134 insertions, 129 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 359955800d..628c7ac590 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -739,28 +739,26 @@ static inline void audit_free_context(struct audit_context *context)
739void audit_log_task_context(struct audit_buffer *ab) 739void audit_log_task_context(struct audit_buffer *ab)
740{ 740{
741 char *ctx = NULL; 741 char *ctx = NULL;
742 ssize_t len = 0; 742 unsigned len;
743 int error;
744 u32 sid;
745
746 selinux_get_task_sid(current, &sid);
747 if (!sid)
748 return;
743 749
744 len = security_getprocattr(current, "current", NULL, 0); 750 error = selinux_sid_to_string(sid, &ctx, &len);
745 if (len < 0) { 751 if (error) {
746 if (len != -EINVAL) 752 if (error != -EINVAL)
747 goto error_path; 753 goto error_path;
748 return; 754 return;
749 } 755 }
750 756
751 ctx = kmalloc(len, GFP_KERNEL);
752 if (!ctx)
753 goto error_path;
754
755 len = security_getprocattr(current, "current", ctx, len);
756 if (len < 0 )
757 goto error_path;
758
759 audit_log_format(ab, " subj=%s", ctx); 757 audit_log_format(ab, " subj=%s", ctx);
758 kfree(ctx);
760 return; 759 return;
761 760
762error_path: 761error_path:
763 kfree(ctx);
764 audit_panic("error in audit_log_task_context"); 762 audit_panic("error in audit_log_task_context");
765 return; 763 return;
766} 764}
diff --git a/kernel/fork.c b/kernel/fork.c
index d154cc7864..6af959c034 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -933,8 +933,8 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
933 933
934static inline void rt_mutex_init_task(struct task_struct *p) 934static inline void rt_mutex_init_task(struct task_struct *p)
935{ 935{
936#ifdef CONFIG_RT_MUTEXES
937 spin_lock_init(&p->pi_lock); 936 spin_lock_init(&p->pi_lock);
937#ifdef CONFIG_RT_MUTEXES
938 plist_head_init(&p->pi_waiters, &p->pi_lock); 938 plist_head_init(&p->pi_waiters, &p->pi_lock);
939 p->pi_blocked_on = NULL; 939 p->pi_blocked_on = NULL;
940#endif 940#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index e749e7df14..5a270b5e3f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -565,6 +565,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
565 if (!pi_state) 565 if (!pi_state)
566 return -EINVAL; 566 return -EINVAL;
567 567
568 spin_lock(&pi_state->pi_mutex.wait_lock);
568 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 569 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
569 570
570 /* 571 /*
@@ -604,6 +605,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
604 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
605 spin_unlock_irq(&new_owner->pi_lock); 606 spin_unlock_irq(&new_owner->pi_lock);
606 607
608 spin_unlock(&pi_state->pi_mutex.wait_lock);
607 rt_mutex_unlock(&pi_state->pi_mutex); 609 rt_mutex_unlock(&pi_state->pi_mutex);
608 610
609 return 0; 611 return 0;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ec4cb9f3e3..6a7938a0d5 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
135static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) 135static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
136{ 136{
137 ktime_t xtim, tomono; 137 ktime_t xtim, tomono;
138 struct timespec xts; 138 struct timespec xts, tom;
139 unsigned long seq; 139 unsigned long seq;
140 140
141 do { 141 do {
@@ -145,10 +145,11 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
145#else 145#else
146 xts = xtime; 146 xts = xtime;
147#endif 147#endif
148 tom = wall_to_monotonic;
148 } while (read_seqretry(&xtime_lock, seq)); 149 } while (read_seqretry(&xtime_lock, seq));
149 150
150 xtim = timespec_to_ktime(xts); 151 xtim = timespec_to_ktime(xts);
151 tomono = timespec_to_ktime(wall_to_monotonic); 152 tomono = timespec_to_ktime(tom);
152 base->clock_base[CLOCK_REALTIME].softirq_time = xtim; 153 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
153 base->clock_base[CLOCK_MONOTONIC].softirq_time = 154 base->clock_base[CLOCK_MONOTONIC].softirq_time =
154 ktime_add(xtim, tomono); 155 ktime_add(xtim, tomono);
@@ -644,6 +645,12 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
644 orun++; 645 orun++;
645 } 646 }
646 timer->expires = ktime_add(timer->expires, interval); 647 timer->expires = ktime_add(timer->expires, interval);
648 /*
649 * Make sure, that the result did not wrap with a very large
650 * interval.
651 */
652 if (timer->expires.tv64 < 0)
653 timer->expires = ktime_set(KTIME_SEC_MAX, 0);
647 654
648 return orun; 655 return orun;
649} 656}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8dc24c92dc..7065a687ac 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2742,6 +2742,10 @@ void debug_show_all_locks(void)
2742 int count = 10; 2742 int count = 10;
2743 int unlock = 1; 2743 int unlock = 1;
2744 2744
2745 if (unlikely(!debug_locks)) {
2746 printk("INFO: lockdep is turned off.\n");
2747 return;
2748 }
2745 printk("\nShowing all locks held in the system:\n"); 2749 printk("\nShowing all locks held in the system:\n");
2746 2750
2747 /* 2751 /*
@@ -2785,6 +2789,10 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
2785 2789
2786void debug_show_held_locks(struct task_struct *task) 2790void debug_show_held_locks(struct task_struct *task)
2787{ 2791{
2792 if (unlikely(!debug_locks)) {
2793 printk("INFO: lockdep is turned off.\n");
2794 return;
2795 }
2788 lockdep_print_held_locks(task); 2796 lockdep_print_held_locks(task);
2789} 2797}
2790 2798
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 623786d441..89bcf4973e 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -27,7 +27,15 @@ int pm_prepare_console(void)
27 return 1; 27 return 1;
28 } 28 }
29 29
30 set_console(SUSPEND_CONSOLE); 30 if (set_console(SUSPEND_CONSOLE)) {
31 /*
32 * We're unable to switch to the SUSPEND_CONSOLE.
33 * Let the calling function know so it can decide
34 * what to do.
35 */
36 release_console_sem();
37 return 1;
38 }
31 release_console_sem(); 39 release_console_sem();
32 40
33 if (vt_waitactive(SUSPEND_CONSOLE)) { 41 if (vt_waitactive(SUSPEND_CONSOLE)) {
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 406b20adb2..aec19b063e 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -240,12 +240,6 @@ static int software_resume(void)
240 goto Done; 240 goto Done;
241 } 241 }
242 242
243 error = platform_prepare();
244 if (error) {
245 swsusp_free();
246 goto Thaw;
247 }
248
249 pr_debug("PM: Reading swsusp image.\n"); 243 pr_debug("PM: Reading swsusp image.\n");
250 244
251 error = swsusp_read(); 245 error = swsusp_read();
@@ -268,7 +262,6 @@ static int software_resume(void)
268 enable_nonboot_cpus(); 262 enable_nonboot_cpus();
269 Free: 263 Free:
270 swsusp_free(); 264 swsusp_free();
271 platform_finish();
272 device_resume(); 265 device_resume();
273 resume_console(); 266 resume_console();
274 Thaw: 267 Thaw:
diff --git a/kernel/power/user.c b/kernel/power/user.c
index dd09efe7df..7cf6713b23 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -368,9 +368,12 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
368 if (error) { 368 if (error) {
369 printk(KERN_ERR "Failed to suspend some devices.\n"); 369 printk(KERN_ERR "Failed to suspend some devices.\n");
370 } else { 370 } else {
371 /* Enter S3, system is already frozen */ 371 error = disable_nonboot_cpus();
372 suspend_enter(PM_SUSPEND_MEM); 372 if (!error) {
373 373 /* Enter S3, system is already frozen */
374 suspend_enter(PM_SUSPEND_MEM);
375 enable_nonboot_cpus();
376 }
374 /* Wake up devices */ 377 /* Wake up devices */
375 device_resume(); 378 device_resume();
376 } 379 }
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 67932ea78c..76212b2a99 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -274,72 +274,3 @@ void clockevents_notify(unsigned long reason, void *arg)
274} 274}
275EXPORT_SYMBOL_GPL(clockevents_notify); 275EXPORT_SYMBOL_GPL(clockevents_notify);
276 276
277#ifdef CONFIG_SYSFS
278
279/**
280 * clockevents_show_registered - sysfs interface for listing clockevents
281 * @dev: unused
282 * @buf: char buffer to be filled with clock events list
283 *
284 * Provides sysfs interface for listing registered clock event devices
285 */
286static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf)
287{
288 struct list_head *tmp;
289 char *p = buf;
290 int cpu;
291
292 spin_lock(&clockevents_lock);
293
294 list_for_each(tmp, &clockevent_devices) {
295 struct clock_event_device *ce;
296
297 ce = list_entry(tmp, struct clock_event_device, list);
298 p += sprintf(p, "%-20s F:%04x M:%d", ce->name,
299 ce->features, ce->mode);
300 p += sprintf(p, " C:");
301 if (!cpus_equal(ce->cpumask, cpu_possible_map)) {
302 for_each_cpu_mask(cpu, ce->cpumask)
303 p += sprintf(p, " %d", cpu);
304 } else {
305 /*
306 * FIXME: Add the cpu which is handling this sucker
307 */
308 }
309 p += sprintf(p, "\n");
310 }
311
312 spin_unlock(&clockevents_lock);
313
314 return p - buf;
315}
316
317/*
318 * Sysfs setup bits:
319 */
320static SYSDEV_ATTR(registered, 0600,
321 clockevents_show_registered, NULL);
322
323static struct sysdev_class clockevents_sysclass = {
324 set_kset_name("clockevents"),
325};
326
327static struct sys_device clockevents_sys_device = {
328 .id = 0,
329 .cls = &clockevents_sysclass,
330};
331
332static int __init clockevents_sysfs_init(void)
333{
334 int error = sysdev_class_register(&clockevents_sysclass);
335
336 if (!error)
337 error = sysdev_register(&clockevents_sys_device);
338 if (!error)
339 error = sysdev_create_file(
340 &clockevents_sys_device,
341 &attr_registered);
342 return error;
343}
344device_initcall(clockevents_sysfs_init);
345#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 5b0e46b56f..fe5c7db242 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -151,7 +151,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
151 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 151 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
152 add_timer(&watchdog_timer); 152 add_timer(&watchdog_timer);
153 } 153 }
154 } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) { 154 } else {
155 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
155 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 156 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
156 157
157 if (!watchdog || cs->rating > watchdog->rating) { 158 if (!watchdog || cs->rating > watchdog->rating) {
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index eb12509e00..cb25649c6f 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -32,7 +32,7 @@ static u64 tick_length, tick_length_base;
32/* TIME_ERROR prevents overwriting the CMOS clock */ 32/* TIME_ERROR prevents overwriting the CMOS clock */
33static int time_state = TIME_OK; /* clock synchronization status */ 33static int time_state = TIME_OK; /* clock synchronization status */
34int time_status = STA_UNSYNC; /* clock status bits */ 34int time_status = STA_UNSYNC; /* clock status bits */
35static long time_offset; /* time adjustment (ns) */ 35static s64 time_offset; /* time adjustment (ns) */
36static long time_constant = 2; /* pll time constant */ 36static long time_constant = 2; /* pll time constant */
37long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ 37long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
38long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ 38long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
@@ -196,7 +196,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void)
196 */ 196 */
197int do_adjtimex(struct timex *txc) 197int do_adjtimex(struct timex *txc)
198{ 198{
199 long ltemp, mtemp, save_adjust; 199 long mtemp, save_adjust, rem;
200 s64 freq_adj, temp64; 200 s64 freq_adj, temp64;
201 int result; 201 int result;
202 202
@@ -277,14 +277,14 @@ int do_adjtimex(struct timex *txc)
277 time_adjust = txc->offset; 277 time_adjust = txc->offset;
278 } 278 }
279 else if (time_status & STA_PLL) { 279 else if (time_status & STA_PLL) {
280 ltemp = txc->offset * NSEC_PER_USEC; 280 time_offset = txc->offset * NSEC_PER_USEC;
281 281
282 /* 282 /*
283 * Scale the phase adjustment and 283 * Scale the phase adjustment and
284 * clamp to the operating range. 284 * clamp to the operating range.
285 */ 285 */
286 time_offset = min(ltemp, MAXPHASE * NSEC_PER_USEC); 286 time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC);
287 time_offset = max(time_offset, -MAXPHASE * NSEC_PER_USEC); 287 time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC);
288 288
289 /* 289 /*
290 * Select whether the frequency is to be controlled 290 * Select whether the frequency is to be controlled
@@ -297,11 +297,11 @@ int do_adjtimex(struct timex *txc)
297 mtemp = xtime.tv_sec - time_reftime; 297 mtemp = xtime.tv_sec - time_reftime;
298 time_reftime = xtime.tv_sec; 298 time_reftime = xtime.tv_sec;
299 299
300 freq_adj = (s64)time_offset * mtemp; 300 freq_adj = time_offset * mtemp;
301 freq_adj = shift_right(freq_adj, time_constant * 2 + 301 freq_adj = shift_right(freq_adj, time_constant * 2 +
302 (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); 302 (SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
303 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { 303 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
304 temp64 = (s64)time_offset << (SHIFT_NSEC - SHIFT_FLL); 304 temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
305 if (time_offset < 0) { 305 if (time_offset < 0) {
306 temp64 = -temp64; 306 temp64 = -temp64;
307 do_div(temp64, mtemp); 307 do_div(temp64, mtemp);
@@ -314,8 +314,10 @@ int do_adjtimex(struct timex *txc)
314 freq_adj += time_freq; 314 freq_adj += time_freq;
315 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); 315 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
316 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); 316 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
317 time_offset = (time_offset / NTP_INTERVAL_FREQ) 317 time_offset = div_long_long_rem_signed(time_offset,
318 << SHIFT_UPDATE; 318 NTP_INTERVAL_FREQ,
319 &rem);
320 time_offset <<= SHIFT_UPDATE;
319 } /* STA_PLL */ 321 } /* STA_PLL */
320 } /* txc->modes & ADJ_OFFSET */ 322 } /* txc->modes & ADJ_OFFSET */
321 if (txc->modes & ADJ_TICK) 323 if (txc->modes & ADJ_TICK)
@@ -328,12 +330,12 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
328 result = TIME_ERROR; 330 result = TIME_ERROR;
329 331
330 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) 332 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
331 txc->offset = save_adjust; 333 txc->offset = save_adjust;
332 else 334 else
333 txc->offset = shift_right(time_offset, SHIFT_UPDATE) 335 txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
334 * NTP_INTERVAL_FREQ / 1000; 336 NTP_INTERVAL_FREQ / 1000;
335 txc->freq = (time_freq / NSEC_PER_USEC) 337 txc->freq = (time_freq / NSEC_PER_USEC) <<
336 << (SHIFT_USEC - SHIFT_NSEC); 338 (SHIFT_USEC - SHIFT_NSEC);
337 txc->maxerror = time_maxerror; 339 txc->maxerror = time_maxerror;
338 txc->esterror = time_esterror; 340 txc->esterror = time_esterror;
339 txc->status = time_status; 341 txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 5567745470..eadfce2fff 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -307,12 +307,19 @@ int tick_resume_broadcast(void)
307 spin_lock_irqsave(&tick_broadcast_lock, flags); 307 spin_lock_irqsave(&tick_broadcast_lock, flags);
308 308
309 bc = tick_broadcast_device.evtdev; 309 bc = tick_broadcast_device.evtdev;
310 if (bc) {
311 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC &&
312 !cpus_empty(tick_broadcast_mask))
313 tick_broadcast_start_periodic(bc);
314 310
315 broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask); 311 if (bc) {
312 switch (tick_broadcast_device.mode) {
313 case TICKDEV_MODE_PERIODIC:
314 if(!cpus_empty(tick_broadcast_mask))
315 tick_broadcast_start_periodic(bc);
316 broadcast = cpu_isset(smp_processor_id(),
317 tick_broadcast_mask);
318 break;
319 case TICKDEV_MODE_ONESHOT:
320 broadcast = tick_resume_broadcast_oneshot(bc);
321 break;
322 }
316 } 323 }
317 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 324 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
318 325
@@ -347,6 +354,16 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
347 } 354 }
348} 355}
349 356
357int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
358{
359 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
360
361 if(!cpus_empty(tick_broadcast_oneshot_mask))
362 tick_broadcast_set_event(ktime_get(), 1);
363
364 return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask);
365}
366
350/* 367/*
351 * Reprogram the broadcast device: 368 * Reprogram the broadcast device:
352 * 369 *
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 43ba1bdec1..bfda3f7f07 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -298,18 +298,17 @@ static void tick_shutdown(unsigned int *cpup)
298 spin_unlock_irqrestore(&tick_device_lock, flags); 298 spin_unlock_irqrestore(&tick_device_lock, flags);
299} 299}
300 300
301static void tick_suspend_periodic(void) 301static void tick_suspend(void)
302{ 302{
303 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 303 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
304 unsigned long flags; 304 unsigned long flags;
305 305
306 spin_lock_irqsave(&tick_device_lock, flags); 306 spin_lock_irqsave(&tick_device_lock, flags);
307 if (td->mode == TICKDEV_MODE_PERIODIC) 307 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
308 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
309 spin_unlock_irqrestore(&tick_device_lock, flags); 308 spin_unlock_irqrestore(&tick_device_lock, flags);
310} 309}
311 310
312static void tick_resume_periodic(void) 311static void tick_resume(void)
313{ 312{
314 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 313 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
315 unsigned long flags; 314 unsigned long flags;
@@ -317,6 +316,8 @@ static void tick_resume_periodic(void)
317 spin_lock_irqsave(&tick_device_lock, flags); 316 spin_lock_irqsave(&tick_device_lock, flags);
318 if (td->mode == TICKDEV_MODE_PERIODIC) 317 if (td->mode == TICKDEV_MODE_PERIODIC)
319 tick_setup_periodic(td->evtdev, 0); 318 tick_setup_periodic(td->evtdev, 0);
319 else
320 tick_resume_oneshot();
320 spin_unlock_irqrestore(&tick_device_lock, flags); 321 spin_unlock_irqrestore(&tick_device_lock, flags);
321} 322}
322 323
@@ -348,13 +349,13 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
348 break; 349 break;
349 350
350 case CLOCK_EVT_NOTIFY_SUSPEND: 351 case CLOCK_EVT_NOTIFY_SUSPEND:
351 tick_suspend_periodic(); 352 tick_suspend();
352 tick_suspend_broadcast(); 353 tick_suspend_broadcast();
353 break; 354 break;
354 355
355 case CLOCK_EVT_NOTIFY_RESUME: 356 case CLOCK_EVT_NOTIFY_RESUME:
356 if (!tick_resume_broadcast()) 357 if (!tick_resume_broadcast())
357 tick_resume_periodic(); 358 tick_resume();
358 break; 359 break;
359 360
360 default: 361 default:
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 75890efd24..c9d203bde5 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -19,12 +19,13 @@ extern void tick_setup_oneshot(struct clock_event_device *newdev,
19extern int tick_program_event(ktime_t expires, int force); 19extern int tick_program_event(ktime_t expires, int force);
20extern void tick_oneshot_notify(void); 20extern void tick_oneshot_notify(void);
21extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); 21extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
22 22extern void tick_resume_oneshot(void);
23# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 23# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
24extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 24extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
25extern void tick_broadcast_oneshot_control(unsigned long reason); 25extern void tick_broadcast_oneshot_control(unsigned long reason);
26extern void tick_broadcast_switch_to_oneshot(void); 26extern void tick_broadcast_switch_to_oneshot(void);
27extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 27extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
28extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
28# else /* BROADCAST */ 29# else /* BROADCAST */
29static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 30static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
30{ 31{
@@ -43,6 +44,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
43{ 44{
44 BUG(); 45 BUG();
45} 46}
47static inline void tick_resume_oneshot(void)
48{
49 BUG();
50}
46static inline int tick_program_event(ktime_t expires, int force) 51static inline int tick_program_event(ktime_t expires, int force)
47{ 52{
48 return 0; 53 return 0;
@@ -54,6 +59,10 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
54} 59}
55static inline void tick_broadcast_oneshot_control(unsigned long reason) { } 60static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
56static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 61static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
62static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
63{
64 return 0;
65}
57#endif /* !TICK_ONESHOT */ 66#endif /* !TICK_ONESHOT */
58 67
59/* 68/*
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 2e8b7ff863..f6997ab0c3 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -41,6 +41,18 @@ int tick_program_event(ktime_t expires, int force)
41} 41}
42 42
43/** 43/**
44 * tick_resume_onshot - resume oneshot mode
45 */
46void tick_resume_oneshot(void)
47{
48 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
49 struct clock_event_device *dev = td->evtdev;
50
51 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
52 tick_program_event(ktime_get(), 1);
53}
54
55/**
44 * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) 56 * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz)
45 */ 57 */
46void tick_setup_oneshot(struct clock_event_device *newdev, 58void tick_setup_oneshot(struct clock_event_device *newdev,
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index f82c635c3d..59df5e8555 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -194,9 +194,9 @@ print_tickdevice(struct seq_file *m, struct tick_device *td)
194 return; 194 return;
195 } 195 }
196 SEQ_printf(m, "%s\n", dev->name); 196 SEQ_printf(m, "%s\n", dev->name);
197 SEQ_printf(m, " max_delta_ns: %ld\n", dev->max_delta_ns); 197 SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns);
198 SEQ_printf(m, " min_delta_ns: %ld\n", dev->min_delta_ns); 198 SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns);
199 SEQ_printf(m, " mult: %ld\n", dev->mult); 199 SEQ_printf(m, " mult: %lu\n", dev->mult);
200 SEQ_printf(m, " shift: %d\n", dev->shift); 200 SEQ_printf(m, " shift: %d\n", dev->shift);
201 SEQ_printf(m, " mode: %d\n", dev->mode); 201 SEQ_printf(m, " mode: %d\n", dev->mode);
202 SEQ_printf(m, " next_event: %Ld nsecs\n", 202 SEQ_printf(m, " next_event: %Ld nsecs\n",
diff --git a/kernel/timer.c b/kernel/timer.c
index 797cccb864..440048acae 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -695,15 +695,28 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
695{ 695{
696 ktime_t hr_delta = hrtimer_get_next_event(); 696 ktime_t hr_delta = hrtimer_get_next_event();
697 struct timespec tsdelta; 697 struct timespec tsdelta;
698 unsigned long delta;
698 699
699 if (hr_delta.tv64 == KTIME_MAX) 700 if (hr_delta.tv64 == KTIME_MAX)
700 return expires; 701 return expires;
701 702
702 if (hr_delta.tv64 <= TICK_NSEC) 703 /*
703 return now; 704 * Expired timer available, let it expire in the next tick
705 */
706 if (hr_delta.tv64 <= 0)
707 return now + 1;
704 708
705 tsdelta = ktime_to_timespec(hr_delta); 709 tsdelta = ktime_to_timespec(hr_delta);
706 now += timespec_to_jiffies(&tsdelta); 710 delta = timespec_to_jiffies(&tsdelta);
711 /*
712 * Take rounding errors in to account and make sure, that it
713 * expires in the next tick. Otherwise we go into an endless
714 * ping pong due to tick_nohz_stop_sched_tick() retriggering
715 * the timer softirq
716 */
717 if (delta < 1)
718 delta = 1;
719 now += delta;
707 if (time_before(now, expires)) 720 if (time_before(now, expires))
708 return now; 721 return now;
709 return expires; 722 return expires;