aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 22:56:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 22:56:15 -0500
commit9465d9cc31fa732089cd8bec9f1bdfcdc174a5ce (patch)
treefb31a0a6271b255ffe6e29b4f9eb4192253f8c7f /kernel
parente71c3978d6f97659f6c3ee942c3e581299e4adf2 (diff)
parentc029a2bec66e42e57538cb65e28618baf6a4b311 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: "The time/timekeeping/timer folks deliver with this update: - Fix a reintroduced signed/unsigned issue and cleanup the whole signed/unsigned mess in the timekeeping core so this wont happen accidentaly again. - Add a new trace clock based on boot time - Prevent injection of random sleep times when PM tracing abuses the RTC for storage - Make posix timers configurable for real tiny systems - Add tracepoints for the alarm timer subsystem so timer based suspend wakeups can be instrumented - The usual pile of fixes and updates to core and drivers" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) timekeeping: Use mul_u64_u32_shr() instead of open coding it timekeeping: Get rid of pointless typecasts timekeeping: Make the conversion call chain consistently unsigned timekeeping_Force_unsigned_clocksource_to_nanoseconds_conversion alarmtimer: Add tracepoints for alarm timers trace: Update documentation for mono, mono_raw and boot clock trace: Add an option for boot clock as trace clock timekeeping: Add a fast and NMI safe boot clock timekeeping/clocksource_cyc2ns: Document intended range limitation timekeeping: Ignore the bogus sleep time if pm_trace is enabled selftests/timers: Fix spelling mistake "Asyncrhonous" -> "Asynchronous" clocksource/drivers/bcm2835_timer: Unmap region obtained by of_iomap clocksource/drivers/arm_arch_timer: Map frame with of_io_request_and_map() arm64: dts: rockchip: Arch counter doesn't tick in system suspend clocksource/drivers/arm_arch_timer: Don't assume clock runs in suspend posix-timers: Make them configurable posix_cpu_timers: Move the add_device_randomness() call to a proper place timer: Move sys_alarm from timer.c to itimer.c ptp_clock: Allow for it to be optional Kconfig: Regenerate *.c_shipped files after previous changes ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/exit.c15
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/Makefile10
-rw-r--r--kernel/time/alarmtimer.c59
-rw-r--r--kernel/time/hrtimer.c20
-rw-r--r--kernel/time/itimer.c15
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/posix-stubs.c123
-rw-r--r--kernel/time/timekeeping.c90
-rw-r--r--kernel/time/timer.c48
-rw-r--r--kernel/trace/trace.c1
14 files changed, 307 insertions, 97 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 333d364be29d..b3a047f208a7 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -307,12 +307,17 @@ static inline long put_compat_itimerval(struct compat_itimerval __user *o,
307 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); 307 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
308} 308}
309 309
310asmlinkage long sys_ni_posix_timers(void);
311
310COMPAT_SYSCALL_DEFINE2(getitimer, int, which, 312COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
311 struct compat_itimerval __user *, it) 313 struct compat_itimerval __user *, it)
312{ 314{
313 struct itimerval kit; 315 struct itimerval kit;
314 int error; 316 int error;
315 317
318 if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
319 return sys_ni_posix_timers();
320
316 error = do_getitimer(which, &kit); 321 error = do_getitimer(which, &kit);
317 if (!error && put_compat_itimerval(it, &kit)) 322 if (!error && put_compat_itimerval(it, &kit))
318 error = -EFAULT; 323 error = -EFAULT;
@@ -326,6 +331,9 @@ COMPAT_SYSCALL_DEFINE3(setitimer, int, which,
326 struct itimerval kin, kout; 331 struct itimerval kin, kout;
327 int error; 332 int error;
328 333
334 if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
335 return sys_ni_posix_timers();
336
329 if (in) { 337 if (in) {
330 if (get_compat_itimerval(&kin, in)) 338 if (get_compat_itimerval(&kin, in))
331 return -EFAULT; 339 return -EFAULT;
diff --git a/kernel/exit.c b/kernel/exit.c
index 3076f3089919..aacff8e2aec0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,6 +54,7 @@
54#include <linux/writeback.h> 54#include <linux/writeback.h>
55#include <linux/shm.h> 55#include <linux/shm.h>
56#include <linux/kcov.h> 56#include <linux/kcov.h>
57#include <linux/random.h>
57 58
58#include <asm/uaccess.h> 59#include <asm/uaccess.h>
59#include <asm/unistd.h> 60#include <asm/unistd.h>
@@ -91,11 +92,10 @@ static void __exit_signal(struct task_struct *tsk)
91 lockdep_tasklist_lock_is_held()); 92 lockdep_tasklist_lock_is_held());
92 spin_lock(&sighand->siglock); 93 spin_lock(&sighand->siglock);
93 94
95#ifdef CONFIG_POSIX_TIMERS
94 posix_cpu_timers_exit(tsk); 96 posix_cpu_timers_exit(tsk);
95 if (group_dead) { 97 if (group_dead) {
96 posix_cpu_timers_exit_group(tsk); 98 posix_cpu_timers_exit_group(tsk);
97 tty = sig->tty;
98 sig->tty = NULL;
99 } else { 99 } else {
100 /* 100 /*
101 * This can only happen if the caller is de_thread(). 101 * This can only happen if the caller is de_thread().
@@ -104,7 +104,13 @@ static void __exit_signal(struct task_struct *tsk)
104 */ 104 */
105 if (unlikely(has_group_leader_pid(tsk))) 105 if (unlikely(has_group_leader_pid(tsk)))
106 posix_cpu_timers_exit_group(tsk); 106 posix_cpu_timers_exit_group(tsk);
107 }
108#endif
107 109
110 if (group_dead) {
111 tty = sig->tty;
112 sig->tty = NULL;
113 } else {
108 /* 114 /*
109 * If there is any task waiting for the group exit 115 * If there is any task waiting for the group exit
110 * then notify it: 116 * then notify it:
@@ -116,6 +122,9 @@ static void __exit_signal(struct task_struct *tsk)
116 sig->curr_target = next_thread(tsk); 122 sig->curr_target = next_thread(tsk);
117 } 123 }
118 124
125 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
126 sizeof(unsigned long long));
127
119 /* 128 /*
120 * Accumulate here the counters for all threads as they die. We could 129 * Accumulate here the counters for all threads as they die. We could
121 * skip the group leader because it is the last user of signal_struct, 130 * skip the group leader because it is the last user of signal_struct,
@@ -799,8 +808,10 @@ void __noreturn do_exit(long code)
799 acct_update_integrals(tsk); 808 acct_update_integrals(tsk);
800 group_dead = atomic_dec_and_test(&tsk->signal->live); 809 group_dead = atomic_dec_and_test(&tsk->signal->live);
801 if (group_dead) { 810 if (group_dead) {
811#ifdef CONFIG_POSIX_TIMERS
802 hrtimer_cancel(&tsk->signal->real_timer); 812 hrtimer_cancel(&tsk->signal->real_timer);
803 exit_itimers(tsk->signal); 813 exit_itimers(tsk->signal);
814#endif
804 if (tsk->mm) 815 if (tsk->mm)
805 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 816 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
806 } 817 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 7ffa16033ded..5957cf8b4c4b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1347,8 +1347,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1347 seqlock_init(&sig->stats_lock); 1347 seqlock_init(&sig->stats_lock);
1348 prev_cputime_init(&sig->prev_cputime); 1348 prev_cputime_init(&sig->prev_cputime);
1349 1349
1350#ifdef CONFIG_POSIX_TIMERS
1350 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1351 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1351 sig->real_timer.function = it_real_fn; 1352 sig->real_timer.function = it_real_fn;
1353#endif
1352 1354
1353 task_lock(current->group_leader); 1355 task_lock(current->group_leader);
1354 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 1356 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
diff --git a/kernel/signal.c b/kernel/signal.c
index 75761acc77cf..29a410780aa9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -427,6 +427,7 @@ void flush_signals(struct task_struct *t)
427 spin_unlock_irqrestore(&t->sighand->siglock, flags); 427 spin_unlock_irqrestore(&t->sighand->siglock, flags);
428} 428}
429 429
430#ifdef CONFIG_POSIX_TIMERS
430static void __flush_itimer_signals(struct sigpending *pending) 431static void __flush_itimer_signals(struct sigpending *pending)
431{ 432{
432 sigset_t signal, retain; 433 sigset_t signal, retain;
@@ -460,6 +461,7 @@ void flush_itimer_signals(void)
460 __flush_itimer_signals(&tsk->signal->shared_pending); 461 __flush_itimer_signals(&tsk->signal->shared_pending);
461 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 462 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
462} 463}
464#endif
463 465
464void ignore_signals(struct task_struct *t) 466void ignore_signals(struct task_struct *t)
465{ 467{
@@ -567,6 +569,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
567 if (!signr) { 569 if (!signr) {
568 signr = __dequeue_signal(&tsk->signal->shared_pending, 570 signr = __dequeue_signal(&tsk->signal->shared_pending,
569 mask, info); 571 mask, info);
572#ifdef CONFIG_POSIX_TIMERS
570 /* 573 /*
571 * itimer signal ? 574 * itimer signal ?
572 * 575 *
@@ -590,6 +593,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
590 hrtimer_restart(tmr); 593 hrtimer_restart(tmr);
591 } 594 }
592 } 595 }
596#endif
593 } 597 }
594 598
595 recalc_sigpending(); 599 recalc_sigpending();
@@ -611,6 +615,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
611 */ 615 */
612 current->jobctl |= JOBCTL_STOP_DEQUEUED; 616 current->jobctl |= JOBCTL_STOP_DEQUEUED;
613 } 617 }
618#ifdef CONFIG_POSIX_TIMERS
614 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 619 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
615 /* 620 /*
616 * Release the siglock to ensure proper locking order 621 * Release the siglock to ensure proper locking order
@@ -622,6 +627,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
622 do_schedule_next_timer(info); 627 do_schedule_next_timer(info);
623 spin_lock(&tsk->sighand->siglock); 628 spin_lock(&tsk->sighand->siglock);
624 } 629 }
630#endif
625 return signr; 631 return signr;
626} 632}
627 633
diff --git a/kernel/sys.c b/kernel/sys.c
index 89d5be418157..78c9fb7dd680 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1416,7 +1416,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
1416 * applications, so we live with it 1416 * applications, so we live with it
1417 */ 1417 */
1418 if (!retval && new_rlim && resource == RLIMIT_CPU && 1418 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1419 new_rlim->rlim_cur != RLIM_INFINITY) 1419 new_rlim->rlim_cur != RLIM_INFINITY &&
1420 IS_ENABLED(CONFIG_POSIX_TIMERS))
1420 update_rlimit_cpu(tsk, new_rlim->rlim_cur); 1421 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1421out: 1422out:
1422 read_unlock(&tasklist_lock); 1423 read_unlock(&tasklist_lock);
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 49eca0beed32..976840d29a71 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,6 +1,12 @@
1obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o 1obj-y += time.o timer.o hrtimer.o
2obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o 2obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
3obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o 3obj-y += timeconv.o timecounter.o alarmtimer.o
4
5ifeq ($(CONFIG_POSIX_TIMERS),y)
6 obj-y += posix-timers.o posix-cpu-timers.o posix-clock.o itimer.o
7else
8 obj-y += posix-stubs.o
9endif
4 10
5obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o tick-common.o 11obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o tick-common.o
6ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) 12ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 12dd190634ab..9b08ca391aed 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -26,6 +26,9 @@
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/freezer.h> 27#include <linux/freezer.h>
28 28
29#define CREATE_TRACE_POINTS
30#include <trace/events/alarmtimer.h>
31
29/** 32/**
30 * struct alarm_base - Alarm timer bases 33 * struct alarm_base - Alarm timer bases
31 * @lock: Lock for syncrhonized access to the base 34 * @lock: Lock for syncrhonized access to the base
@@ -40,7 +43,9 @@ static struct alarm_base {
40 clockid_t base_clockid; 43 clockid_t base_clockid;
41} alarm_bases[ALARM_NUMTYPE]; 44} alarm_bases[ALARM_NUMTYPE];
42 45
43/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ 46/* freezer information to handle clock_nanosleep triggered wakeups */
47static enum alarmtimer_type freezer_alarmtype;
48static ktime_t freezer_expires;
44static ktime_t freezer_delta; 49static ktime_t freezer_delta;
45static DEFINE_SPINLOCK(freezer_delta_lock); 50static DEFINE_SPINLOCK(freezer_delta_lock);
46 51
@@ -194,6 +199,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
194 } 199 }
195 spin_unlock_irqrestore(&base->lock, flags); 200 spin_unlock_irqrestore(&base->lock, flags);
196 201
202 trace_alarmtimer_fired(alarm, base->gettime());
197 return ret; 203 return ret;
198 204
199} 205}
@@ -218,15 +224,16 @@ EXPORT_SYMBOL_GPL(alarm_expires_remaining);
218 */ 224 */
219static int alarmtimer_suspend(struct device *dev) 225static int alarmtimer_suspend(struct device *dev)
220{ 226{
221 struct rtc_time tm; 227 ktime_t min, now, expires;
222 ktime_t min, now; 228 int i, ret, type;
223 unsigned long flags;
224 struct rtc_device *rtc; 229 struct rtc_device *rtc;
225 int i; 230 unsigned long flags;
226 int ret; 231 struct rtc_time tm;
227 232
228 spin_lock_irqsave(&freezer_delta_lock, flags); 233 spin_lock_irqsave(&freezer_delta_lock, flags);
229 min = freezer_delta; 234 min = freezer_delta;
235 expires = freezer_expires;
236 type = freezer_alarmtype;
230 freezer_delta = ktime_set(0, 0); 237 freezer_delta = ktime_set(0, 0);
231 spin_unlock_irqrestore(&freezer_delta_lock, flags); 238 spin_unlock_irqrestore(&freezer_delta_lock, flags);
232 239
@@ -247,8 +254,11 @@ static int alarmtimer_suspend(struct device *dev)
247 if (!next) 254 if (!next)
248 continue; 255 continue;
249 delta = ktime_sub(next->expires, base->gettime()); 256 delta = ktime_sub(next->expires, base->gettime());
250 if (!min.tv64 || (delta.tv64 < min.tv64)) 257 if (!min.tv64 || (delta.tv64 < min.tv64)) {
258 expires = next->expires;
251 min = delta; 259 min = delta;
260 type = i;
261 }
252 } 262 }
253 if (min.tv64 == 0) 263 if (min.tv64 == 0)
254 return 0; 264 return 0;
@@ -258,6 +268,8 @@ static int alarmtimer_suspend(struct device *dev)
258 return -EBUSY; 268 return -EBUSY;
259 } 269 }
260 270
271 trace_alarmtimer_suspend(expires, type);
272
261 /* Setup an rtc timer to fire that far in the future */ 273 /* Setup an rtc timer to fire that far in the future */
262 rtc_timer_cancel(rtc, &rtctimer); 274 rtc_timer_cancel(rtc, &rtctimer);
263 rtc_read_time(rtc, &tm); 275 rtc_read_time(rtc, &tm);
@@ -295,15 +307,32 @@ static int alarmtimer_resume(struct device *dev)
295 307
296static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) 308static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
297{ 309{
298 ktime_t delta; 310 struct alarm_base *base;
299 unsigned long flags; 311 unsigned long flags;
300 struct alarm_base *base = &alarm_bases[type]; 312 ktime_t delta;
313
314 switch(type) {
315 case ALARM_REALTIME:
316 base = &alarm_bases[ALARM_REALTIME];
317 type = ALARM_REALTIME_FREEZER;
318 break;
319 case ALARM_BOOTTIME:
320 base = &alarm_bases[ALARM_BOOTTIME];
321 type = ALARM_BOOTTIME_FREEZER;
322 break;
323 default:
324 WARN_ONCE(1, "Invalid alarm type: %d\n", type);
325 return;
326 }
301 327
302 delta = ktime_sub(absexp, base->gettime()); 328 delta = ktime_sub(absexp, base->gettime());
303 329
304 spin_lock_irqsave(&freezer_delta_lock, flags); 330 spin_lock_irqsave(&freezer_delta_lock, flags);
305 if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) 331 if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) {
306 freezer_delta = delta; 332 freezer_delta = delta;
333 freezer_expires = absexp;
334 freezer_alarmtype = type;
335 }
307 spin_unlock_irqrestore(&freezer_delta_lock, flags); 336 spin_unlock_irqrestore(&freezer_delta_lock, flags);
308} 337}
309 338
@@ -342,6 +371,8 @@ void alarm_start(struct alarm *alarm, ktime_t start)
342 alarmtimer_enqueue(base, alarm); 371 alarmtimer_enqueue(base, alarm);
343 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS); 372 hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
344 spin_unlock_irqrestore(&base->lock, flags); 373 spin_unlock_irqrestore(&base->lock, flags);
374
375 trace_alarmtimer_start(alarm, base->gettime());
345} 376}
346EXPORT_SYMBOL_GPL(alarm_start); 377EXPORT_SYMBOL_GPL(alarm_start);
347 378
@@ -390,6 +421,8 @@ int alarm_try_to_cancel(struct alarm *alarm)
390 if (ret >= 0) 421 if (ret >= 0)
391 alarmtimer_dequeue(base, alarm); 422 alarmtimer_dequeue(base, alarm);
392 spin_unlock_irqrestore(&base->lock, flags); 423 spin_unlock_irqrestore(&base->lock, flags);
424
425 trace_alarmtimer_cancel(alarm, base->gettime());
393 return ret; 426 return ret;
394} 427}
395EXPORT_SYMBOL_GPL(alarm_try_to_cancel); 428EXPORT_SYMBOL_GPL(alarm_try_to_cancel);
@@ -846,8 +879,10 @@ static int __init alarmtimer_init(void)
846 879
847 alarmtimer_rtc_timer_init(); 880 alarmtimer_rtc_timer_init();
848 881
849 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); 882 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
850 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); 883 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
884 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
885 }
851 886
852 /* Initialize alarm bases */ 887 /* Initialize alarm bases */
853 alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME; 888 alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bb5ec425dfe0..08be5c99d26b 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1742,15 +1742,19 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
1742 * You can set the task state as follows - 1742 * You can set the task state as follows -
1743 * 1743 *
1744 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1744 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1745 * pass before the routine returns. 1745 * pass before the routine returns unless the current task is explicitly
1746 * woken up, (e.g. by wake_up_process()).
1746 * 1747 *
1747 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1748 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1748 * delivered to the current task. 1749 * delivered to the current task or the current task is explicitly woken
1750 * up.
1749 * 1751 *
1750 * The current task state is guaranteed to be TASK_RUNNING when this 1752 * The current task state is guaranteed to be TASK_RUNNING when this
1751 * routine returns. 1753 * routine returns.
1752 * 1754 *
1753 * Returns 0 when the timer has expired otherwise -EINTR 1755 * Returns 0 when the timer has expired. If the task was woken before the
1756 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
1757 * by an explicit wakeup, it returns -EINTR.
1754 */ 1758 */
1755int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, 1759int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
1756 const enum hrtimer_mode mode) 1760 const enum hrtimer_mode mode)
@@ -1772,15 +1776,19 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1772 * You can set the task state as follows - 1776 * You can set the task state as follows -
1773 * 1777 *
1774 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1778 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1775 * pass before the routine returns. 1779 * pass before the routine returns unless the current task is explicitly
1780 * woken up, (e.g. by wake_up_process()).
1776 * 1781 *
1777 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1782 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1778 * delivered to the current task. 1783 * delivered to the current task or the current task is explicitly woken
1784 * up.
1779 * 1785 *
1780 * The current task state is guaranteed to be TASK_RUNNING when this 1786 * The current task state is guaranteed to be TASK_RUNNING when this
1781 * routine returns. 1787 * routine returns.
1782 * 1788 *
1783 * Returns 0 when the timer has expired otherwise -EINTR 1789 * Returns 0 when the timer has expired. If the task was woken before the
1790 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
1791 * by an explicit wakeup, it returns -EINTR.
1784 */ 1792 */
1785int __sched schedule_hrtimeout(ktime_t *expires, 1793int __sched schedule_hrtimeout(ktime_t *expires,
1786 const enum hrtimer_mode mode) 1794 const enum hrtimer_mode mode)
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 1d5c7204ddc9..2b9f45bc955d 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -238,6 +238,8 @@ again:
238 return 0; 238 return 0;
239} 239}
240 240
241#ifdef __ARCH_WANT_SYS_ALARM
242
241/** 243/**
242 * alarm_setitimer - set alarm in seconds 244 * alarm_setitimer - set alarm in seconds
243 * 245 *
@@ -250,7 +252,7 @@ again:
250 * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid 252 * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid
251 * negative timeval settings which would cause immediate expiry. 253 * negative timeval settings which would cause immediate expiry.
252 */ 254 */
253unsigned int alarm_setitimer(unsigned int seconds) 255static unsigned int alarm_setitimer(unsigned int seconds)
254{ 256{
255 struct itimerval it_new, it_old; 257 struct itimerval it_new, it_old;
256 258
@@ -275,6 +277,17 @@ unsigned int alarm_setitimer(unsigned int seconds)
275 return it_old.it_value.tv_sec; 277 return it_old.it_value.tv_sec;
276} 278}
277 279
280/*
281 * For backwards compatibility? This can be done in libc so Alpha
282 * and all newer ports shouldn't need it.
283 */
284SYSCALL_DEFINE1(alarm, unsigned int, seconds)
285{
286 return alarm_setitimer(seconds);
287}
288
289#endif
290
278SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value, 291SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
279 struct itimerval __user *, ovalue) 292 struct itimerval __user *, ovalue)
280{ 293{
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index e887ffc8eef3..f246763c9947 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -9,7 +9,6 @@
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
11#include <trace/events/timer.h> 11#include <trace/events/timer.h>
12#include <linux/random.h>
13#include <linux/tick.h> 12#include <linux/tick.h>
14#include <linux/workqueue.h> 13#include <linux/workqueue.h>
15 14
@@ -447,10 +446,7 @@ static void cleanup_timers(struct list_head *head)
447 */ 446 */
448void posix_cpu_timers_exit(struct task_struct *tsk) 447void posix_cpu_timers_exit(struct task_struct *tsk)
449{ 448{
450 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
451 sizeof(unsigned long long));
452 cleanup_timers(tsk->cpu_timers); 449 cleanup_timers(tsk->cpu_timers);
453
454} 450}
455void posix_cpu_timers_exit_group(struct task_struct *tsk) 451void posix_cpu_timers_exit_group(struct task_struct *tsk)
456{ 452{
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
new file mode 100644
index 000000000000..cd6716e115e8
--- /dev/null
+++ b/kernel/time/posix-stubs.c
@@ -0,0 +1,123 @@
1/*
2 * Dummy stubs used when CONFIG_POSIX_TIMERS=n
3 *
4 * Created by: Nicolas Pitre, July 2016
5 * Copyright: (C) 2016 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/linkage.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/syscalls.h>
17#include <linux/ktime.h>
18#include <linux/timekeeping.h>
19#include <linux/posix-timers.h>
20
21asmlinkage long sys_ni_posix_timers(void)
22{
23 pr_err_once("process %d (%s) attempted a POSIX timer syscall "
24 "while CONFIG_POSIX_TIMERS is not set\n",
25 current->pid, current->comm);
26 return -ENOSYS;
27}
28
29#define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
30
31SYS_NI(timer_create);
32SYS_NI(timer_gettime);
33SYS_NI(timer_getoverrun);
34SYS_NI(timer_settime);
35SYS_NI(timer_delete);
36SYS_NI(clock_adjtime);
37SYS_NI(getitimer);
38SYS_NI(setitimer);
39#ifdef __ARCH_WANT_SYS_ALARM
40SYS_NI(alarm);
41#endif
42
43/*
44 * We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC
45 * as it is easy to remain compatible with little code. CLOCK_BOOTTIME
46 * is also included for convenience as at least systemd uses it.
47 */
48
49SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
50 const struct timespec __user *, tp)
51{
52 struct timespec new_tp;
53
54 if (which_clock != CLOCK_REALTIME)
55 return -EINVAL;
56 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
57 return -EFAULT;
58 return do_sys_settimeofday(&new_tp, NULL);
59}
60
61SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
62 struct timespec __user *,tp)
63{
64 struct timespec kernel_tp;
65
66 switch (which_clock) {
67 case CLOCK_REALTIME: ktime_get_real_ts(&kernel_tp); break;
68 case CLOCK_MONOTONIC: ktime_get_ts(&kernel_tp); break;
69 case CLOCK_BOOTTIME: get_monotonic_boottime(&kernel_tp); break;
70 default: return -EINVAL;
71 }
72 if (copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
73 return -EFAULT;
74 return 0;
75}
76
77SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp)
78{
79 struct timespec rtn_tp = {
80 .tv_sec = 0,
81 .tv_nsec = hrtimer_resolution,
82 };
83
84 switch (which_clock) {
85 case CLOCK_REALTIME:
86 case CLOCK_MONOTONIC:
87 case CLOCK_BOOTTIME:
88 if (copy_to_user(tp, &rtn_tp, sizeof(rtn_tp)))
89 return -EFAULT;
90 return 0;
91 default:
92 return -EINVAL;
93 }
94}
95
96SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
97 const struct timespec __user *, rqtp,
98 struct timespec __user *, rmtp)
99{
100 struct timespec t;
101
102 switch (which_clock) {
103 case CLOCK_REALTIME:
104 case CLOCK_MONOTONIC:
105 case CLOCK_BOOTTIME:
106 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
107 return -EFAULT;
108 if (!timespec_valid(&t))
109 return -EINVAL;
110 return hrtimer_nanosleep(&t, rmtp, flags & TIMER_ABSTIME ?
111 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
112 which_clock);
113 default:
114 return -EINVAL;
115 }
116}
117
118#ifdef CONFIG_COMPAT
119long clock_nanosleep_restart(struct restart_block *restart_block)
120{
121 return hrtimer_nanosleep_restart(restart_block);
122}
123#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 37dec7e3db43..da233cdf89b0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -258,10 +258,9 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
258 tk->cycle_interval = interval; 258 tk->cycle_interval = interval;
259 259
260 /* Go back from cycles -> shifted ns */ 260 /* Go back from cycles -> shifted ns */
261 tk->xtime_interval = (u64) interval * clock->mult; 261 tk->xtime_interval = interval * clock->mult;
262 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 262 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
263 tk->raw_interval = 263 tk->raw_interval = (interval * clock->mult) >> clock->shift;
264 ((u64) interval * clock->mult) >> clock->shift;
265 264
266 /* if changing clocks, convert xtime_nsec shift units */ 265 /* if changing clocks, convert xtime_nsec shift units */
267 if (old_clock) { 266 if (old_clock) {
@@ -299,10 +298,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
299static inline u32 arch_gettimeoffset(void) { return 0; } 298static inline u32 arch_gettimeoffset(void) { return 0; }
300#endif 299#endif
301 300
302static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr, 301static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
303 cycle_t delta) 302 cycle_t delta)
304{ 303{
305 s64 nsec; 304 u64 nsec;
306 305
307 nsec = delta * tkr->mult + tkr->xtime_nsec; 306 nsec = delta * tkr->mult + tkr->xtime_nsec;
308 nsec >>= tkr->shift; 307 nsec >>= tkr->shift;
@@ -311,7 +310,7 @@ static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
311 return nsec + arch_gettimeoffset(); 310 return nsec + arch_gettimeoffset();
312} 311}
313 312
314static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) 313static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
315{ 314{
316 cycle_t delta; 315 cycle_t delta;
317 316
@@ -319,8 +318,8 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
319 return timekeeping_delta_to_ns(tkr, delta); 318 return timekeeping_delta_to_ns(tkr, delta);
320} 319}
321 320
322static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, 321static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
323 cycle_t cycles) 322 cycle_t cycles)
324{ 323{
325 cycle_t delta; 324 cycle_t delta;
326 325
@@ -425,6 +424,35 @@ u64 ktime_get_raw_fast_ns(void)
425} 424}
426EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); 425EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
427 426
427/**
428 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
429 *
430 * To keep it NMI safe since we're accessing from tracing, we're not using a
431 * separate timekeeper with updates to monotonic clock and boot offset
432 * protected with seqlocks. This has the following minor side effects:
433 *
434 * (1) Its possible that a timestamp be taken after the boot offset is updated
435 * but before the timekeeper is updated. If this happens, the new boot offset
436 * is added to the old timekeeping making the clock appear to update slightly
437 * earlier:
438 * CPU 0 CPU 1
439 * timekeeping_inject_sleeptime64()
440 * __timekeeping_inject_sleeptime(tk, delta);
441 * timestamp();
442 * timekeeping_update(tk, TK_CLEAR_NTP...);
443 *
444 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
445 * partially updated. Since the tk->offs_boot update is a rare event, this
446 * should be a rare occurrence which postprocessing should be able to handle.
447 */
448u64 notrace ktime_get_boot_fast_ns(void)
449{
450 struct timekeeper *tk = &tk_core.timekeeper;
451
452 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
453}
454EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
455
428/* Suspend-time cycles value for halted fast timekeeper. */ 456/* Suspend-time cycles value for halted fast timekeeper. */
429static cycle_t cycles_at_suspend; 457static cycle_t cycles_at_suspend;
430 458
@@ -623,7 +651,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
623{ 651{
624 struct clocksource *clock = tk->tkr_mono.clock; 652 struct clocksource *clock = tk->tkr_mono.clock;
625 cycle_t cycle_now, delta; 653 cycle_t cycle_now, delta;
626 s64 nsec; 654 u64 nsec;
627 655
628 cycle_now = tk->tkr_mono.read(clock); 656 cycle_now = tk->tkr_mono.read(clock);
629 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 657 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
@@ -652,7 +680,7 @@ int __getnstimeofday64(struct timespec64 *ts)
652{ 680{
653 struct timekeeper *tk = &tk_core.timekeeper; 681 struct timekeeper *tk = &tk_core.timekeeper;
654 unsigned long seq; 682 unsigned long seq;
655 s64 nsecs = 0; 683 u64 nsecs;
656 684
657 do { 685 do {
658 seq = read_seqcount_begin(&tk_core.seq); 686 seq = read_seqcount_begin(&tk_core.seq);
@@ -692,7 +720,7 @@ ktime_t ktime_get(void)
692 struct timekeeper *tk = &tk_core.timekeeper; 720 struct timekeeper *tk = &tk_core.timekeeper;
693 unsigned int seq; 721 unsigned int seq;
694 ktime_t base; 722 ktime_t base;
695 s64 nsecs; 723 u64 nsecs;
696 724
697 WARN_ON(timekeeping_suspended); 725 WARN_ON(timekeeping_suspended);
698 726
@@ -735,7 +763,7 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
735 struct timekeeper *tk = &tk_core.timekeeper; 763 struct timekeeper *tk = &tk_core.timekeeper;
736 unsigned int seq; 764 unsigned int seq;
737 ktime_t base, *offset = offsets[offs]; 765 ktime_t base, *offset = offsets[offs];
738 s64 nsecs; 766 u64 nsecs;
739 767
740 WARN_ON(timekeeping_suspended); 768 WARN_ON(timekeeping_suspended);
741 769
@@ -779,7 +807,7 @@ ktime_t ktime_get_raw(void)
779 struct timekeeper *tk = &tk_core.timekeeper; 807 struct timekeeper *tk = &tk_core.timekeeper;
780 unsigned int seq; 808 unsigned int seq;
781 ktime_t base; 809 ktime_t base;
782 s64 nsecs; 810 u64 nsecs;
783 811
784 do { 812 do {
785 seq = read_seqcount_begin(&tk_core.seq); 813 seq = read_seqcount_begin(&tk_core.seq);
@@ -804,8 +832,8 @@ void ktime_get_ts64(struct timespec64 *ts)
804{ 832{
805 struct timekeeper *tk = &tk_core.timekeeper; 833 struct timekeeper *tk = &tk_core.timekeeper;
806 struct timespec64 tomono; 834 struct timespec64 tomono;
807 s64 nsec;
808 unsigned int seq; 835 unsigned int seq;
836 u64 nsec;
809 837
810 WARN_ON(timekeeping_suspended); 838 WARN_ON(timekeeping_suspended);
811 839
@@ -893,8 +921,8 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
893 unsigned long seq; 921 unsigned long seq;
894 ktime_t base_raw; 922 ktime_t base_raw;
895 ktime_t base_real; 923 ktime_t base_real;
896 s64 nsec_raw; 924 u64 nsec_raw;
897 s64 nsec_real; 925 u64 nsec_real;
898 cycle_t now; 926 cycle_t now;
899 927
900 WARN_ON_ONCE(timekeeping_suspended); 928 WARN_ON_ONCE(timekeeping_suspended);
@@ -1052,7 +1080,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
1052 cycle_t cycles, now, interval_start; 1080 cycle_t cycles, now, interval_start;
1053 unsigned int clock_was_set_seq = 0; 1081 unsigned int clock_was_set_seq = 0;
1054 ktime_t base_real, base_raw; 1082 ktime_t base_real, base_raw;
1055 s64 nsec_real, nsec_raw; 1083 u64 nsec_real, nsec_raw;
1056 u8 cs_was_changed_seq; 1084 u8 cs_was_changed_seq;
1057 unsigned long seq; 1085 unsigned long seq;
1058 bool do_interp; 1086 bool do_interp;
@@ -1365,7 +1393,7 @@ void getrawmonotonic64(struct timespec64 *ts)
1365 struct timekeeper *tk = &tk_core.timekeeper; 1393 struct timekeeper *tk = &tk_core.timekeeper;
1366 struct timespec64 ts64; 1394 struct timespec64 ts64;
1367 unsigned long seq; 1395 unsigned long seq;
1368 s64 nsecs; 1396 u64 nsecs;
1369 1397
1370 do { 1398 do {
1371 seq = read_seqcount_begin(&tk_core.seq); 1399 seq = read_seqcount_begin(&tk_core.seq);
@@ -1616,7 +1644,7 @@ void timekeeping_resume(void)
1616 struct clocksource *clock = tk->tkr_mono.clock; 1644 struct clocksource *clock = tk->tkr_mono.clock;
1617 unsigned long flags; 1645 unsigned long flags;
1618 struct timespec64 ts_new, ts_delta; 1646 struct timespec64 ts_new, ts_delta;
1619 cycle_t cycle_now, cycle_delta; 1647 cycle_t cycle_now;
1620 1648
1621 sleeptime_injected = false; 1649 sleeptime_injected = false;
1622 read_persistent_clock64(&ts_new); 1650 read_persistent_clock64(&ts_new);
@@ -1642,27 +1670,11 @@ void timekeeping_resume(void)
1642 cycle_now = tk->tkr_mono.read(clock); 1670 cycle_now = tk->tkr_mono.read(clock);
1643 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1671 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1644 cycle_now > tk->tkr_mono.cycle_last) { 1672 cycle_now > tk->tkr_mono.cycle_last) {
1645 u64 num, max = ULLONG_MAX; 1673 u64 nsec, cyc_delta;
1646 u32 mult = clock->mult;
1647 u32 shift = clock->shift;
1648 s64 nsec = 0;
1649
1650 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1651 tk->tkr_mono.mask);
1652
1653 /*
1654 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1655 * suspended time is too long. In that case we need do the
1656 * 64 bits math carefully
1657 */
1658 do_div(max, mult);
1659 if (cycle_delta > max) {
1660 num = div64_u64(cycle_delta, max);
1661 nsec = (((u64) max * mult) >> shift) * num;
1662 cycle_delta -= num * max;
1663 }
1664 nsec += ((u64) cycle_delta * mult) >> shift;
1665 1674
1675 cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1676 tk->tkr_mono.mask);
1677 nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
1666 ts_delta = ns_to_timespec64(nsec); 1678 ts_delta = ns_to_timespec64(nsec);
1667 sleeptime_injected = true; 1679 sleeptime_injected = true;
1668 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { 1680 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index c611c47de884..ea4fbf8477a9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1615,7 +1615,8 @@ void update_process_times(int user_tick)
1615 irq_work_tick(); 1615 irq_work_tick();
1616#endif 1616#endif
1617 scheduler_tick(); 1617 scheduler_tick();
1618 run_posix_cpu_timers(p); 1618 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1619 run_posix_cpu_timers(p);
1619} 1620}
1620 1621
1621/** 1622/**
@@ -1676,19 +1677,6 @@ void run_local_timers(void)
1676 raise_softirq(TIMER_SOFTIRQ); 1677 raise_softirq(TIMER_SOFTIRQ);
1677} 1678}
1678 1679
1679#ifdef __ARCH_WANT_SYS_ALARM
1680
1681/*
1682 * For backwards compatibility? This can be done in libc so Alpha
1683 * and all newer ports shouldn't need it.
1684 */
1685SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1686{
1687 return alarm_setitimer(seconds);
1688}
1689
1690#endif
1691
1692static void process_timeout(unsigned long __data) 1680static void process_timeout(unsigned long __data)
1693{ 1681{
1694 wake_up_process((struct task_struct *)__data); 1682 wake_up_process((struct task_struct *)__data);
@@ -1705,11 +1693,12 @@ static void process_timeout(unsigned long __data)
1705 * You can set the task state as follows - 1693 * You can set the task state as follows -
1706 * 1694 *
1707 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1695 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1708 * pass before the routine returns. The routine will return 0 1696 * pass before the routine returns unless the current task is explicitly
1697 * woken up, (e.g. by wake_up_process())".
1709 * 1698 *
1710 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1699 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1711 * delivered to the current task. In this case the remaining time 1700 * delivered to the current task or the current task is explicitly woken
1712 * in jiffies will be returned, or 0 if the timer expired in time 1701 * up.
1713 * 1702 *
1714 * The current task state is guaranteed to be TASK_RUNNING when this 1703 * The current task state is guaranteed to be TASK_RUNNING when this
1715 * routine returns. 1704 * routine returns.
@@ -1718,7 +1707,9 @@ static void process_timeout(unsigned long __data)
1718 * the CPU away without a bound on the timeout. In this case the return 1707 * the CPU away without a bound on the timeout. In this case the return
1719 * value will be %MAX_SCHEDULE_TIMEOUT. 1708 * value will be %MAX_SCHEDULE_TIMEOUT.
1720 * 1709 *
1721 * In all cases the return value is guaranteed to be non-negative. 1710 * Returns 0 when the timer has expired otherwise the remaining time in
1711 * jiffies will be returned. In all cases the return value is guaranteed
1712 * to be non-negative.
1722 */ 1713 */
1723signed long __sched schedule_timeout(signed long timeout) 1714signed long __sched schedule_timeout(signed long timeout)
1724{ 1715{
@@ -1910,16 +1901,6 @@ unsigned long msleep_interruptible(unsigned int msecs)
1910 1901
1911EXPORT_SYMBOL(msleep_interruptible); 1902EXPORT_SYMBOL(msleep_interruptible);
1912 1903
1913static void __sched do_usleep_range(unsigned long min, unsigned long max)
1914{
1915 ktime_t kmin;
1916 u64 delta;
1917
1918 kmin = ktime_set(0, min * NSEC_PER_USEC);
1919 delta = (u64)(max - min) * NSEC_PER_USEC;
1920 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1921}
1922
1923/** 1904/**
1924 * usleep_range - Sleep for an approximate time 1905 * usleep_range - Sleep for an approximate time
1925 * @min: Minimum time in usecs to sleep 1906 * @min: Minimum time in usecs to sleep
@@ -1933,7 +1914,14 @@ static void __sched do_usleep_range(unsigned long min, unsigned long max)
1933 */ 1914 */
1934void __sched usleep_range(unsigned long min, unsigned long max) 1915void __sched usleep_range(unsigned long min, unsigned long max)
1935{ 1916{
1936 __set_current_state(TASK_UNINTERRUPTIBLE); 1917 ktime_t exp = ktime_add_us(ktime_get(), min);
1937 do_usleep_range(min, max); 1918 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
1919
1920 for (;;) {
1921 __set_current_state(TASK_UNINTERRUPTIBLE);
1922 /* Do not return before the requested sleep time has elapsed */
1923 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
1924 break;
1925 }
1938} 1926}
1939EXPORT_SYMBOL(usleep_range); 1927EXPORT_SYMBOL(usleep_range);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 465d56febc5b..54d5270a5042 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1125,6 +1125,7 @@ static struct {
1125 { trace_clock, "perf", 1 }, 1125 { trace_clock, "perf", 1 },
1126 { ktime_get_mono_fast_ns, "mono", 1 }, 1126 { ktime_get_mono_fast_ns, "mono", 1 },
1127 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1127 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1128 { ktime_get_boot_fast_ns, "boot", 1 },
1128 ARCH_TRACE_CLOCKS 1129 ARCH_TRACE_CLOCKS
1129}; 1130};
1130 1131