aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/um/kernel/time.c7
-rw-r--r--include/linux/hrtimer.h7
-rw-r--r--include/linux/posix-timers.h37
-rw-r--r--include/linux/time.h3
-rw-r--r--kernel/posix-timers.c717
5 files changed, 149 insertions, 622 deletions
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 11f518a7e156..8fa2ae7f3026 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -99,7 +99,8 @@ void uml_idle_timer(void)
99 set_interval(ITIMER_REAL); 99 set_interval(ITIMER_REAL);
100} 100}
101 101
102extern int do_posix_clock_monotonic_gettime(struct timespec *tp); 102extern void ktime_get_ts(struct timespec *ts);
103#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
103 104
104void time_init(void) 105void time_init(void)
105{ 106{
@@ -114,8 +115,8 @@ void time_init(void)
114 wall_to_monotonic.tv_nsec = -now.tv_nsec; 115 wall_to_monotonic.tv_nsec = -now.tv_nsec;
115} 116}
116 117
117/* Declared in linux/time.h, which can't be included here */ 118/* Defined in linux/ktimer.h, which can't be included here */
118extern void clock_was_set(void); 119#define clock_was_set() do { } while (0)
119 120
120void do_gettimeofday(struct timeval *tv) 121void do_gettimeofday(struct timeval *tv)
121{ 122{
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2ac20b48b2f3..cf5cfdf8d613 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -93,6 +93,13 @@ struct hrtimer_base {
93 struct hrtimer *curr_timer; 93 struct hrtimer *curr_timer;
94}; 94};
95 95
96/*
97 * clock_was_set() is a NOP for non- high-resolution systems. The
98 * time-sorted order guarantees that a timer does not expire early and
99 * is expired in the next softirq when the clock was advanced.
100 */
101#define clock_was_set() do { } while (0)
102
96/* Exported timer functions: */ 103/* Exported timer functions: */
97 104
98/* Initialize timers: */ 105/* Initialize timers: */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 3c0a5beb7f0d..54faf5236da0 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -51,12 +51,8 @@ struct k_itimer {
51 struct sigqueue *sigq; /* signal queue entry. */ 51 struct sigqueue *sigq; /* signal queue entry. */
52 union { 52 union {
53 struct { 53 struct {
54 struct timer_list timer; 54 struct hrtimer timer;
55 /* clock abs_timer_list: */ 55 ktime_t interval;
56 struct list_head abs_timer_entry;
57 /* wall_to_monotonic used when set: */
58 struct timespec wall_to_prev;
59 unsigned long incr; /* interval in jiffies */
60 } real; 56 } real;
61 struct cpu_timer_list cpu; 57 struct cpu_timer_list cpu;
62 struct { 58 struct {
@@ -68,15 +64,9 @@ struct k_itimer {
68 } it; 64 } it;
69}; 65};
70 66
71struct k_clock_abs {
72 struct list_head list;
73 spinlock_t lock;
74};
75
76struct k_clock { 67struct k_clock {
77 int res; /* in nanoseconds */ 68 int res; /* in nanoseconds */
78 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); 69 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
79 struct k_clock_abs *abs_struct;
80 int (*clock_set) (const clockid_t which_clock, struct timespec * tp); 70 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
81 int (*clock_get) (const clockid_t which_clock, struct timespec * tp); 71 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
82 int (*timer_create) (struct k_itimer *timer); 72 int (*timer_create) (struct k_itimer *timer);
@@ -102,29 +92,6 @@ int do_posix_clock_nosettime(const clockid_t, struct timespec *tp);
102/* function to call to trigger timer event */ 92/* function to call to trigger timer event */
103int posix_timer_event(struct k_itimer *timr, int si_private); 93int posix_timer_event(struct k_itimer *timr, int si_private);
104 94
105struct now_struct {
106 unsigned long jiffies;
107};
108
109#define posix_get_now(now) \
110 do { (now)->jiffies = jiffies; } while (0)
111
112#define posix_time_before(timer, now) \
113 time_before((timer)->expires, (now)->jiffies)
114
115#define posix_bump_timer(timr, now) \
116 do { \
117 long delta, orun; \
118 \
119 delta = (now).jiffies - (timr)->it.real.timer.expires; \
120 if (delta >= 0) { \
121 orun = 1 + (delta / (timr)->it.real.incr); \
122 (timr)->it.real.timer.expires += \
123 orun * (timr)->it.real.incr; \
124 (timr)->it_overrun += orun; \
125 } \
126 } while (0)
127
128int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts); 95int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts);
129int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts); 96int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts);
130int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts); 97int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts);
diff --git a/include/linux/time.h b/include/linux/time.h
index f639fde29253..1201155b2202 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -73,8 +73,7 @@ struct timespec current_kernel_time(void);
73extern void do_gettimeofday(struct timeval *tv); 73extern void do_gettimeofday(struct timeval *tv);
74extern int do_settimeofday(struct timespec *tv); 74extern int do_settimeofday(struct timespec *tv);
75extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); 75extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
76extern void clock_was_set(void); // call whenever the clock is set 76#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
77extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
78extern long do_utimes(char __user *filename, struct timeval *times); 77extern long do_utimes(char __user *filename, struct timeval *times);
79struct itimerval; 78struct itimerval;
80extern int do_setitimer(int which, struct itimerval *value, 79extern int do_setitimer(int which, struct itimerval *value,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index ba900587b815..9e66e614862a 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -35,7 +35,6 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/time.h> 37#include <linux/time.h>
38#include <linux/calc64.h>
39 38
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41#include <asm/semaphore.h> 40#include <asm/semaphore.h>
@@ -49,12 +48,6 @@
49#include <linux/workqueue.h> 48#include <linux/workqueue.h>
50#include <linux/module.h> 49#include <linux/module.h>
51 50
52#define CLOCK_REALTIME_RES TICK_NSEC /* In nano seconds. */
53
54static inline u64 mpy_l_X_l_ll(unsigned long mpy1,unsigned long mpy2)
55{
56 return (u64)mpy1 * mpy2;
57}
58/* 51/*
59 * Management arrays for POSIX timers. Timers are kept in slab memory 52 * Management arrays for POSIX timers. Timers are kept in slab memory
60 * Timer ids are allocated by an external routine that keeps track of the 53 * Timer ids are allocated by an external routine that keeps track of the
@@ -140,18 +133,18 @@ static DEFINE_SPINLOCK(idr_lock);
140 */ 133 */
141 134
142static struct k_clock posix_clocks[MAX_CLOCKS]; 135static struct k_clock posix_clocks[MAX_CLOCKS];
136
143/* 137/*
144 * We only have one real clock that can be set so we need only one abs list, 138 * These ones are defined below.
145 * even if we should want to have several clocks with differing resolutions.
146 */ 139 */
147static struct k_clock_abs abs_list = {.list = LIST_HEAD_INIT(abs_list.list), 140static int common_nsleep(const clockid_t, int flags, struct timespec *t,
148 .lock = SPIN_LOCK_UNLOCKED}; 141 struct timespec __user *rmtp);
142static void common_timer_get(struct k_itimer *, struct itimerspec *);
143static int common_timer_set(struct k_itimer *, int,
144 struct itimerspec *, struct itimerspec *);
145static int common_timer_del(struct k_itimer *timer);
149 146
150static void posix_timer_fn(unsigned long); 147static int posix_timer_fn(void *data);
151static u64 do_posix_clock_monotonic_gettime_parts(
152 struct timespec *tp, struct timespec *mo);
153int do_posix_clock_monotonic_gettime(struct timespec *tp);
154static int do_posix_clock_monotonic_get(const clockid_t, struct timespec *tp);
155 148
156static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); 149static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
157 150
@@ -184,10 +177,12 @@ static inline int common_clock_getres(const clockid_t which_clock,
184 return 0; 177 return 0;
185} 178}
186 179
187static inline int common_clock_get(const clockid_t which_clock, 180/*
188 struct timespec *tp) 181 * Get real time for posix timers
182 */
183static int common_clock_get(clockid_t which_clock, struct timespec *tp)
189{ 184{
190 getnstimeofday(tp); 185 ktime_get_real_ts(tp);
191 return 0; 186 return 0;
192} 187}
193 188
@@ -199,25 +194,14 @@ static inline int common_clock_set(const clockid_t which_clock,
199 194
200static inline int common_timer_create(struct k_itimer *new_timer) 195static inline int common_timer_create(struct k_itimer *new_timer)
201{ 196{
202 INIT_LIST_HEAD(&new_timer->it.real.abs_timer_entry); 197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock);
203 init_timer(&new_timer->it.real.timer); 198 new_timer->it.real.timer.data = new_timer;
204 new_timer->it.real.timer.data = (unsigned long) new_timer;
205 new_timer->it.real.timer.function = posix_timer_fn; 199 new_timer->it.real.timer.function = posix_timer_fn;
206 return 0; 200 return 0;
207} 201}
208 202
209/* 203/*
210 * These ones are defined below. 204 * Return nonzero if we know a priori this clockid_t value is bogus.
211 */
212static int common_nsleep(const clockid_t, int flags, struct timespec *t,
213 struct timespec __user *rmtp);
214static void common_timer_get(struct k_itimer *, struct itimerspec *);
215static int common_timer_set(struct k_itimer *, int,
216 struct itimerspec *, struct itimerspec *);
217static int common_timer_del(struct k_itimer *timer);
218
219/*
220 * Return nonzero iff we know a priori this clockid_t value is bogus.
221 */ 205 */
222static inline int invalid_clockid(const clockid_t which_clock) 206static inline int invalid_clockid(const clockid_t which_clock)
223{ 207{
@@ -227,26 +211,32 @@ static inline int invalid_clockid(const clockid_t which_clock)
227 return 1; 211 return 1;
228 if (posix_clocks[which_clock].clock_getres != NULL) 212 if (posix_clocks[which_clock].clock_getres != NULL)
229 return 0; 213 return 0;
230#ifndef CLOCK_DISPATCH_DIRECT
231 if (posix_clocks[which_clock].res != 0) 214 if (posix_clocks[which_clock].res != 0)
232 return 0; 215 return 0;
233#endif
234 return 1; 216 return 1;
235} 217}
236 218
219/*
220 * Get monotonic time for posix timers
221 */
222static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
223{
224 ktime_get_ts(tp);
225 return 0;
226}
237 227
238/* 228/*
239 * Initialize everything, well, just everything in Posix clocks/timers ;) 229 * Initialize everything, well, just everything in Posix clocks/timers ;)
240 */ 230 */
241static __init int init_posix_timers(void) 231static __init int init_posix_timers(void)
242{ 232{
243 struct k_clock clock_realtime = {.res = CLOCK_REALTIME_RES, 233 struct k_clock clock_realtime = {
244 .abs_struct = &abs_list 234 .clock_getres = hrtimer_get_res,
245 }; 235 };
246 struct k_clock clock_monotonic = {.res = CLOCK_REALTIME_RES, 236 struct k_clock clock_monotonic = {
247 .abs_struct = NULL, 237 .clock_getres = hrtimer_get_res,
248 .clock_get = do_posix_clock_monotonic_get, 238 .clock_get = posix_ktime_get_ts,
249 .clock_set = do_posix_clock_nosettime 239 .clock_set = do_posix_clock_nosettime,
250 }; 240 };
251 241
252 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 242 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
@@ -260,117 +250,17 @@ static __init int init_posix_timers(void)
260 250
261__initcall(init_posix_timers); 251__initcall(init_posix_timers);
262 252
263static void tstojiffie(struct timespec *tp, int res, u64 *jiff)
264{
265 long sec = tp->tv_sec;
266 long nsec = tp->tv_nsec + res - 1;
267
268 if (nsec >= NSEC_PER_SEC) {
269 sec++;
270 nsec -= NSEC_PER_SEC;
271 }
272
273 /*
274 * The scaling constants are defined in <linux/time.h>
275 * The difference between there and here is that we do the
276 * res rounding and compute a 64-bit result (well so does that
277 * but it then throws away the high bits).
278 */
279 *jiff = (mpy_l_X_l_ll(sec, SEC_CONVERSION) +
280 (mpy_l_X_l_ll(nsec, NSEC_CONVERSION) >>
281 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
282}
283
284/*
285 * This function adjusts the timer as needed as a result of the clock
286 * being set. It should only be called for absolute timers, and then
287 * under the abs_list lock. It computes the time difference and sets
288 * the new jiffies value in the timer. It also updates the timers
289 * reference wall_to_monotonic value. It is complicated by the fact
290 * that tstojiffies() only handles positive times and it needs to work
291 * with both positive and negative times. Also, for negative offsets,
292 * we need to defeat the res round up.
293 *
294 * Return is true if there is a new time, else false.
295 */
296static long add_clockset_delta(struct k_itimer *timr,
297 struct timespec *new_wall_to)
298{
299 struct timespec delta;
300 int sign = 0;
301 u64 exp;
302
303 set_normalized_timespec(&delta,
304 new_wall_to->tv_sec -
305 timr->it.real.wall_to_prev.tv_sec,
306 new_wall_to->tv_nsec -
307 timr->it.real.wall_to_prev.tv_nsec);
308 if (likely(!(delta.tv_sec | delta.tv_nsec)))
309 return 0;
310 if (delta.tv_sec < 0) {
311 set_normalized_timespec(&delta,
312 -delta.tv_sec,
313 1 - delta.tv_nsec -
314 posix_clocks[timr->it_clock].res);
315 sign++;
316 }
317 tstojiffie(&delta, posix_clocks[timr->it_clock].res, &exp);
318 timr->it.real.wall_to_prev = *new_wall_to;
319 timr->it.real.timer.expires += (sign ? -exp : exp);
320 return 1;
321}
322
323static void remove_from_abslist(struct k_itimer *timr)
324{
325 if (!list_empty(&timr->it.real.abs_timer_entry)) {
326 spin_lock(&abs_list.lock);
327 list_del_init(&timr->it.real.abs_timer_entry);
328 spin_unlock(&abs_list.lock);
329 }
330}
331
332static void schedule_next_timer(struct k_itimer *timr) 253static void schedule_next_timer(struct k_itimer *timr)
333{ 254{
334 struct timespec new_wall_to; 255 if (timr->it.real.interval.tv64 == 0)
335 struct now_struct now;
336 unsigned long seq;
337
338 /*
339 * Set up the timer for the next interval (if there is one).
340 * Note: this code uses the abs_timer_lock to protect
341 * it.real.wall_to_prev and must hold it until exp is set, not exactly
342 * obvious...
343
344 * This function is used for CLOCK_REALTIME* and
345 * CLOCK_MONOTONIC* timers. If we ever want to handle other
346 * CLOCKs, the calling code (do_schedule_next_timer) would need
347 * to pull the "clock" info from the timer and dispatch the
348 * "other" CLOCKs "next timer" code (which, I suppose should
349 * also be added to the k_clock structure).
350 */
351 if (!timr->it.real.incr)
352 return; 256 return;
353 257
354 do { 258 timr->it_overrun += hrtimer_forward(&timr->it.real.timer,
355 seq = read_seqbegin(&xtime_lock); 259 timr->it.real.interval);
356 new_wall_to = wall_to_monotonic;
357 posix_get_now(&now);
358 } while (read_seqretry(&xtime_lock, seq));
359
360 if (!list_empty(&timr->it.real.abs_timer_entry)) {
361 spin_lock(&abs_list.lock);
362 add_clockset_delta(timr, &new_wall_to);
363
364 posix_bump_timer(timr, now);
365
366 spin_unlock(&abs_list.lock);
367 } else {
368 posix_bump_timer(timr, now);
369 }
370 timr->it_overrun_last = timr->it_overrun; 260 timr->it_overrun_last = timr->it_overrun;
371 timr->it_overrun = -1; 261 timr->it_overrun = -1;
372 ++timr->it_requeue_pending; 262 ++timr->it_requeue_pending;
373 add_timer(&timr->it.real.timer); 263 hrtimer_restart(&timr->it.real.timer);
374} 264}
375 265
376/* 266/*
@@ -391,31 +281,23 @@ void do_schedule_next_timer(struct siginfo *info)
391 281
392 timr = lock_timer(info->si_tid, &flags); 282 timr = lock_timer(info->si_tid, &flags);
393 283
394 if (!timr || timr->it_requeue_pending != info->si_sys_private) 284 if (timr && timr->it_requeue_pending == info->si_sys_private) {
395 goto exit; 285 if (timr->it_clock < 0)
286 posix_cpu_timer_schedule(timr);
287 else
288 schedule_next_timer(timr);
396 289
397 if (timr->it_clock < 0) /* CPU clock */ 290 info->si_overrun = timr->it_overrun_last;
398 posix_cpu_timer_schedule(timr); 291 }
399 else 292
400 schedule_next_timer(timr); 293 unlock_timer(timr, flags);
401 info->si_overrun = timr->it_overrun_last;
402exit:
403 if (timr)
404 unlock_timer(timr, flags);
405} 294}
406 295
407int posix_timer_event(struct k_itimer *timr,int si_private) 296int posix_timer_event(struct k_itimer *timr,int si_private)
408{ 297{
409 memset(&timr->sigq->info, 0, sizeof(siginfo_t)); 298 memset(&timr->sigq->info, 0, sizeof(siginfo_t));
410 timr->sigq->info.si_sys_private = si_private; 299 timr->sigq->info.si_sys_private = si_private;
411 /* 300 /* Send signal to the process that owns this timer.*/
412 * Send signal to the process that owns this timer.
413
414 * This code assumes that all the possible abs_lists share the
415 * same lock (there is only one list at this time). If this is
416 * not the case, the CLOCK info would need to be used to find
417 * the proper abs list lock.
418 */
419 301
420 timr->sigq->info.si_signo = timr->it_sigev_signo; 302 timr->sigq->info.si_signo = timr->it_sigev_signo;
421 timr->sigq->info.si_errno = 0; 303 timr->sigq->info.si_errno = 0;
@@ -449,64 +331,35 @@ EXPORT_SYMBOL_GPL(posix_timer_event);
449 331
450 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. 332 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
451 */ 333 */
452static void posix_timer_fn(unsigned long __data) 334static int posix_timer_fn(void *data)
453{ 335{
454 struct k_itimer *timr = (struct k_itimer *) __data; 336 struct k_itimer *timr = data;
455 unsigned long flags; 337 unsigned long flags;
456 unsigned long seq; 338 int si_private = 0;
457 struct timespec delta, new_wall_to; 339 int ret = HRTIMER_NORESTART;
458 u64 exp = 0;
459 int do_notify = 1;
460 340
461 spin_lock_irqsave(&timr->it_lock, flags); 341 spin_lock_irqsave(&timr->it_lock, flags);
462 if (!list_empty(&timr->it.real.abs_timer_entry)) {
463 spin_lock(&abs_list.lock);
464 do {
465 seq = read_seqbegin(&xtime_lock);
466 new_wall_to = wall_to_monotonic;
467 } while (read_seqretry(&xtime_lock, seq));
468 set_normalized_timespec(&delta,
469 new_wall_to.tv_sec -
470 timr->it.real.wall_to_prev.tv_sec,
471 new_wall_to.tv_nsec -
472 timr->it.real.wall_to_prev.tv_nsec);
473 if (likely((delta.tv_sec | delta.tv_nsec ) == 0)) {
474 /* do nothing, timer is on time */
475 } else if (delta.tv_sec < 0) {
476 /* do nothing, timer is already late */
477 } else {
478 /* timer is early due to a clock set */
479 tstojiffie(&delta,
480 posix_clocks[timr->it_clock].res,
481 &exp);
482 timr->it.real.wall_to_prev = new_wall_to;
483 timr->it.real.timer.expires += exp;
484 add_timer(&timr->it.real.timer);
485 do_notify = 0;
486 }
487 spin_unlock(&abs_list.lock);
488 342
489 } 343 if (timr->it.real.interval.tv64 != 0)
490 if (do_notify) { 344 si_private = ++timr->it_requeue_pending;
491 int si_private=0;
492 345
493 if (timr->it.real.incr) 346 if (posix_timer_event(timr, si_private)) {
494 si_private = ++timr->it_requeue_pending; 347 /*
495 else { 348 * signal was not sent because of sig_ignor
496 remove_from_abslist(timr); 349 * we will not get a call back to restart it AND
350 * it should be restarted.
351 */
352 if (timr->it.real.interval.tv64 != 0) {
353 timr->it_overrun +=
354 hrtimer_forward(&timr->it.real.timer,
355 timr->it.real.interval);
356 ret = HRTIMER_RESTART;
497 } 357 }
498
499 if (posix_timer_event(timr, si_private))
500 /*
501 * signal was not sent because of sig_ignor
502 * we will not get a call back to restart it AND
503 * it should be restarted.
504 */
505 schedule_next_timer(timr);
506 } 358 }
507 unlock_timer(timr, flags); /* hold thru abs lock to keep irq off */
508}
509 359
360 unlock_timer(timr, flags);
361 return ret;
362}
510 363
511static inline struct task_struct * good_sigevent(sigevent_t * event) 364static inline struct task_struct * good_sigevent(sigevent_t * event)
512{ 365{
@@ -597,8 +450,7 @@ sys_timer_create(const clockid_t which_clock,
597 goto out; 450 goto out;
598 } 451 }
599 spin_lock_irq(&idr_lock); 452 spin_lock_irq(&idr_lock);
600 error = idr_get_new(&posix_timers_id, 453 error = idr_get_new(&posix_timers_id, (void *) new_timer,
601 (void *) new_timer,
602 &new_timer_id); 454 &new_timer_id);
603 spin_unlock_irq(&idr_lock); 455 spin_unlock_irq(&idr_lock);
604 if (error == -EAGAIN) 456 if (error == -EAGAIN)
@@ -699,26 +551,6 @@ out:
699} 551}
700 552
701/* 553/*
702 * good_timespec
703 *
704 * This function checks the elements of a timespec structure.
705 *
706 * Arguments:
707 * ts : Pointer to the timespec structure to check
708 *
709 * Return value:
710 * If a NULL pointer was passed in, or the tv_nsec field was less than 0
711 * or greater than NSEC_PER_SEC, or the tv_sec field was less than 0,
712 * this function returns 0. Otherwise it returns 1.
713 */
714static int good_timespec(const struct timespec *ts)
715{
716 if ((!ts) || !timespec_valid(ts))
717 return 0;
718 return 1;
719}
720
721/*
722 * Locking issues: We need to protect the result of the id look up until 554 * Locking issues: We need to protect the result of the id look up until
723 * we get the timer locked down so it is not deleted under us. The 555 * we get the timer locked down so it is not deleted under us. The
724 * removal is done under the idr spinlock so we use that here to bridge 556 * removal is done under the idr spinlock so we use that here to bridge
@@ -770,39 +602,39 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
770static void 602static void
771common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 603common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
772{ 604{
773 unsigned long expires; 605 ktime_t remaining;
774 struct now_struct now; 606 struct hrtimer *timer = &timr->it.real.timer;
775
776 do
777 expires = timr->it.real.timer.expires;
778 while ((volatile long) (timr->it.real.timer.expires) != expires);
779
780 posix_get_now(&now);
781
782 if (expires &&
783 ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) &&
784 !timr->it.real.incr &&
785 posix_time_before(&timr->it.real.timer, &now))
786 timr->it.real.timer.expires = expires = 0;
787 if (expires) {
788 if (timr->it_requeue_pending & REQUEUE_PENDING ||
789 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
790 posix_bump_timer(timr, now);
791 expires = timr->it.real.timer.expires;
792 }
793 else
794 if (!timer_pending(&timr->it.real.timer))
795 expires = 0;
796 if (expires)
797 expires -= now.jiffies;
798 }
799 jiffies_to_timespec(expires, &cur_setting->it_value);
800 jiffies_to_timespec(timr->it.real.incr, &cur_setting->it_interval);
801 607
802 if (cur_setting->it_value.tv_sec < 0) { 608 memset(cur_setting, 0, sizeof(struct itimerspec));
803 cur_setting->it_value.tv_nsec = 1; 609 remaining = hrtimer_get_remaining(timer);
804 cur_setting->it_value.tv_sec = 0; 610
611 /* Time left ? or timer pending */
612 if (remaining.tv64 > 0 || hrtimer_active(timer))
613 goto calci;
614 /* interval timer ? */
615 if (timr->it.real.interval.tv64 == 0)
616 return;
617 /*
618 * When a requeue is pending or this is a SIGEV_NONE timer
619 * move the expiry time forward by intervals, so expiry is >
620 * now.
621 */
622 if (timr->it_requeue_pending & REQUEUE_PENDING ||
623 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
624 timr->it_overrun +=
625 hrtimer_forward(timer, timr->it.real.interval);
626 remaining = hrtimer_get_remaining(timer);
805 } 627 }
628 calci:
629 /* interval timer ? */
630 if (timr->it.real.interval.tv64 != 0)
631 cur_setting->it_interval =
632 ktime_to_timespec(timr->it.real.interval);
633 /* Return 0 only, when the timer is expired and not pending */
634 if (remaining.tv64 <= 0)
635 cur_setting->it_value.tv_nsec = 1;
636 else
637 cur_setting->it_value = ktime_to_timespec(remaining);
806} 638}
807 639
808/* Get the time remaining on a POSIX.1b interval timer. */ 640/* Get the time remaining on a POSIX.1b interval timer. */
@@ -826,6 +658,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
826 658
827 return 0; 659 return 0;
828} 660}
661
829/* 662/*
830 * Get the number of overruns of a POSIX.1b interval timer. This is to 663 * Get the number of overruns of a POSIX.1b interval timer. This is to
831 * be the overrun of the timer last delivered. At the same time we are 664 * be the overrun of the timer last delivered. At the same time we are
@@ -835,7 +668,6 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
835 * the call back to do_schedule_next_timer(). So all we need to do is 668 * the call back to do_schedule_next_timer(). So all we need to do is
836 * to pick up the frozen overrun. 669 * to pick up the frozen overrun.
837 */ 670 */
838
839asmlinkage long 671asmlinkage long
840sys_timer_getoverrun(timer_t timer_id) 672sys_timer_getoverrun(timer_t timer_id)
841{ 673{
@@ -852,84 +684,6 @@ sys_timer_getoverrun(timer_t timer_id)
852 684
853 return overrun; 685 return overrun;
854} 686}
855/*
856 * Adjust for absolute time
857 *
858 * If absolute time is given and it is not CLOCK_MONOTONIC, we need to
859 * adjust for the offset between the timer clock (CLOCK_MONOTONIC) and
860 * what ever clock he is using.
861 *
862 * If it is relative time, we need to add the current (CLOCK_MONOTONIC)
863 * time to it to get the proper time for the timer.
864 */
865static int adjust_abs_time(struct k_clock *clock, struct timespec *tp,
866 int abs, u64 *exp, struct timespec *wall_to)
867{
868 struct timespec now;
869 struct timespec oc = *tp;
870 u64 jiffies_64_f;
871 int rtn =0;
872
873 if (abs) {
874 /*
875 * The mask pick up the 4 basic clocks
876 */
877 if (!((clock - &posix_clocks[0]) & ~CLOCKS_MASK)) {
878 jiffies_64_f = do_posix_clock_monotonic_gettime_parts(
879 &now, wall_to);
880 /*
881 * If we are doing a MONOTONIC clock
882 */
883 if((clock - &posix_clocks[0]) & CLOCKS_MONO){
884 now.tv_sec += wall_to->tv_sec;
885 now.tv_nsec += wall_to->tv_nsec;
886 }
887 } else {
888 /*
889 * Not one of the basic clocks
890 */
891 clock->clock_get(clock - posix_clocks, &now);
892 jiffies_64_f = get_jiffies_64();
893 }
894 /*
895 * Take away now to get delta and normalize
896 */
897 set_normalized_timespec(&oc, oc.tv_sec - now.tv_sec,
898 oc.tv_nsec - now.tv_nsec);
899 }else{
900 jiffies_64_f = get_jiffies_64();
901 }
902 /*
903 * Check if the requested time is prior to now (if so set now)
904 */
905 if (oc.tv_sec < 0)
906 oc.tv_sec = oc.tv_nsec = 0;
907
908 if (oc.tv_sec | oc.tv_nsec)
909 set_normalized_timespec(&oc, oc.tv_sec,
910 oc.tv_nsec + clock->res);
911 tstojiffie(&oc, clock->res, exp);
912
913 /*
914 * Check if the requested time is more than the timer code
915 * can handle (if so we error out but return the value too).
916 */
917 if (*exp > ((u64)MAX_JIFFY_OFFSET))
918 /*
919 * This is a considered response, not exactly in
920 * line with the standard (in fact it is silent on
921 * possible overflows). We assume such a large
922 * value is ALMOST always a programming error and
923 * try not to compound it by setting a really dumb
924 * value.
925 */
926 rtn = -EINVAL;
927 /*
928 * return the actual jiffies expire time, full 64 bits
929 */
930 *exp += jiffies_64_f;
931 return rtn;
932}
933 687
934/* Set a POSIX.1b interval timer. */ 688/* Set a POSIX.1b interval timer. */
935/* timr->it_lock is taken. */ 689/* timr->it_lock is taken. */
@@ -937,68 +691,48 @@ static inline int
937common_timer_set(struct k_itimer *timr, int flags, 691common_timer_set(struct k_itimer *timr, int flags,
938 struct itimerspec *new_setting, struct itimerspec *old_setting) 692 struct itimerspec *new_setting, struct itimerspec *old_setting)
939{ 693{
940 struct k_clock *clock = &posix_clocks[timr->it_clock]; 694 struct hrtimer *timer = &timr->it.real.timer;
941 u64 expire_64;
942 695
943 if (old_setting) 696 if (old_setting)
944 common_timer_get(timr, old_setting); 697 common_timer_get(timr, old_setting);
945 698
946 /* disable the timer */ 699 /* disable the timer */
947 timr->it.real.incr = 0; 700 timr->it.real.interval.tv64 = 0;
948 /* 701 /*
949 * careful here. If smp we could be in the "fire" routine which will 702 * careful here. If smp we could be in the "fire" routine which will
950 * be spinning as we hold the lock. But this is ONLY an SMP issue. 703 * be spinning as we hold the lock. But this is ONLY an SMP issue.
951 */ 704 */
952 if (try_to_del_timer_sync(&timr->it.real.timer) < 0) { 705 if (hrtimer_try_to_cancel(timer) < 0)
953#ifdef CONFIG_SMP
954 /*
955 * It can only be active if on an other cpu. Since
956 * we have cleared the interval stuff above, it should
957 * clear once we release the spin lock. Of course once
958 * we do that anything could happen, including the
959 * complete melt down of the timer. So return with
960 * a "retry" exit status.
961 */
962 return TIMER_RETRY; 706 return TIMER_RETRY;
963#endif
964 }
965
966 remove_from_abslist(timr);
967 707
968 timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 708 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
969 ~REQUEUE_PENDING; 709 ~REQUEUE_PENDING;
970 timr->it_overrun_last = 0; 710 timr->it_overrun_last = 0;
971 timr->it_overrun = -1;
972 /*
973 *switch off the timer when it_value is zero
974 */
975 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) {
976 timr->it.real.timer.expires = 0;
977 return 0;
978 }
979 711
980 if (adjust_abs_time(clock, 712 /* switch off the timer when it_value is zero */
981 &new_setting->it_value, flags & TIMER_ABSTIME, 713 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
982 &expire_64, &(timr->it.real.wall_to_prev))) { 714 return 0;
983 return -EINVAL;
984 }
985 timr->it.real.timer.expires = (unsigned long)expire_64;
986 tstojiffie(&new_setting->it_interval, clock->res, &expire_64);
987 timr->it.real.incr = (unsigned long)expire_64;
988 715
989 /* 716 /* Posix madness. Only absolute CLOCK_REALTIME timers
990 * We do not even queue SIGEV_NONE timers! But we do put them 717 * are affected by clock sets. So we must reiniatilize
991 * in the abs list so we can do that right. 718 * the timer.
992 */ 719 */
993 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)) 720 if (timr->it_clock == CLOCK_REALTIME && (flags & TIMER_ABSTIME))
994 add_timer(&timr->it.real.timer); 721 hrtimer_rebase(timer, CLOCK_REALTIME);
995 722 else
996 if (flags & TIMER_ABSTIME && clock->abs_struct) { 723 hrtimer_rebase(timer, CLOCK_MONOTONIC);
997 spin_lock(&clock->abs_struct->lock); 724
998 list_add_tail(&(timr->it.real.abs_timer_entry), 725 timer->expires = timespec_to_ktime(new_setting->it_value);
999 &(clock->abs_struct->list)); 726
1000 spin_unlock(&clock->abs_struct->lock); 727 /* Convert interval */
1001 } 728 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
729
730 /* SIGEV_NONE timers are not queued ! See common_timer_get */
731 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
732 return 0;
733
734 hrtimer_start(timer, timer->expires, (flags & TIMER_ABSTIME) ?
735 HRTIMER_ABS : HRTIMER_REL);
1002 return 0; 736 return 0;
1003} 737}
1004 738
@@ -1020,8 +754,8 @@ sys_timer_settime(timer_t timer_id, int flags,
1020 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec))) 754 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
1021 return -EFAULT; 755 return -EFAULT;
1022 756
1023 if ((!good_timespec(&new_spec.it_interval)) || 757 if (!timespec_valid(&new_spec.it_interval) ||
1024 (!good_timespec(&new_spec.it_value))) 758 !timespec_valid(&new_spec.it_value))
1025 return -EINVAL; 759 return -EINVAL;
1026retry: 760retry:
1027 timr = lock_timer(timer_id, &flag); 761 timr = lock_timer(timer_id, &flag);
@@ -1037,8 +771,8 @@ retry:
1037 goto retry; 771 goto retry;
1038 } 772 }
1039 773
1040 if (old_setting && !error && copy_to_user(old_setting, 774 if (old_setting && !error &&
1041 &old_spec, sizeof (old_spec))) 775 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
1042 error = -EFAULT; 776 error = -EFAULT;
1043 777
1044 return error; 778 return error;
@@ -1046,24 +780,10 @@ retry:
1046 780
1047static inline int common_timer_del(struct k_itimer *timer) 781static inline int common_timer_del(struct k_itimer *timer)
1048{ 782{
1049 timer->it.real.incr = 0; 783 timer->it.real.interval.tv64 = 0;
1050 784
1051 if (try_to_del_timer_sync(&timer->it.real.timer) < 0) { 785 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
1052#ifdef CONFIG_SMP
1053 /*
1054 * It can only be active if on an other cpu. Since
1055 * we have cleared the interval stuff above, it should
1056 * clear once we release the spin lock. Of course once
1057 * we do that anything could happen, including the
1058 * complete melt down of the timer. So return with
1059 * a "retry" exit status.
1060 */
1061 return TIMER_RETRY; 786 return TIMER_RETRY;
1062#endif
1063 }
1064
1065 remove_from_abslist(timer);
1066
1067 return 0; 787 return 0;
1068} 788}
1069 789
@@ -1079,24 +799,16 @@ sys_timer_delete(timer_t timer_id)
1079 struct k_itimer *timer; 799 struct k_itimer *timer;
1080 long flags; 800 long flags;
1081 801
1082#ifdef CONFIG_SMP
1083 int error;
1084retry_delete: 802retry_delete:
1085#endif
1086 timer = lock_timer(timer_id, &flags); 803 timer = lock_timer(timer_id, &flags);
1087 if (!timer) 804 if (!timer)
1088 return -EINVAL; 805 return -EINVAL;
1089 806
1090#ifdef CONFIG_SMP 807 if (timer_delete_hook(timer) == TIMER_RETRY) {
1091 error = timer_delete_hook(timer);
1092
1093 if (error == TIMER_RETRY) {
1094 unlock_timer(timer, flags); 808 unlock_timer(timer, flags);
1095 goto retry_delete; 809 goto retry_delete;
1096 } 810 }
1097#else 811
1098 timer_delete_hook(timer);
1099#endif
1100 spin_lock(&current->sighand->siglock); 812 spin_lock(&current->sighand->siglock);
1101 list_del(&timer->list); 813 list_del(&timer->list);
1102 spin_unlock(&current->sighand->siglock); 814 spin_unlock(&current->sighand->siglock);
@@ -1113,6 +825,7 @@ retry_delete:
1113 release_posix_timer(timer, IT_ID_SET); 825 release_posix_timer(timer, IT_ID_SET);
1114 return 0; 826 return 0;
1115} 827}
828
1116/* 829/*
1117 * return timer owned by the process, used by exit_itimers 830 * return timer owned by the process, used by exit_itimers
1118 */ 831 */
@@ -1120,22 +833,13 @@ static inline void itimer_delete(struct k_itimer *timer)
1120{ 833{
1121 unsigned long flags; 834 unsigned long flags;
1122 835
1123#ifdef CONFIG_SMP
1124 int error;
1125retry_delete: 836retry_delete:
1126#endif
1127 spin_lock_irqsave(&timer->it_lock, flags); 837 spin_lock_irqsave(&timer->it_lock, flags);
1128 838
1129#ifdef CONFIG_SMP 839 if (timer_delete_hook(timer) == TIMER_RETRY) {
1130 error = timer_delete_hook(timer);
1131
1132 if (error == TIMER_RETRY) {
1133 unlock_timer(timer, flags); 840 unlock_timer(timer, flags);
1134 goto retry_delete; 841 goto retry_delete;
1135 } 842 }
1136#else
1137 timer_delete_hook(timer);
1138#endif
1139 list_del(&timer->list); 843 list_del(&timer->list);
1140 /* 844 /*
1141 * This keeps any tasks waiting on the spin lock from thinking 845 * This keeps any tasks waiting on the spin lock from thinking
@@ -1164,57 +868,7 @@ void exit_itimers(struct signal_struct *sig)
1164 } 868 }
1165} 869}
1166 870
1167/* 871/* Not available / possible... functions */
1168 * And now for the "clock" calls
1169 *
1170 * These functions are called both from timer functions (with the timer
1171 * spin_lock_irq() held and from clock calls with no locking. They must
1172 * use the save flags versions of locks.
1173 */
1174
1175/*
1176 * We do ticks here to avoid the irq lock ( they take sooo long).
1177 * The seqlock is great here. Since we a reader, we don't really care
1178 * if we are interrupted since we don't take lock that will stall us or
1179 * any other cpu. Voila, no irq lock is needed.
1180 *
1181 */
1182
1183static u64 do_posix_clock_monotonic_gettime_parts(
1184 struct timespec *tp, struct timespec *mo)
1185{
1186 u64 jiff;
1187 unsigned int seq;
1188
1189 do {
1190 seq = read_seqbegin(&xtime_lock);
1191 getnstimeofday(tp);
1192 *mo = wall_to_monotonic;
1193 jiff = jiffies_64;
1194
1195 } while(read_seqretry(&xtime_lock, seq));
1196
1197 return jiff;
1198}
1199
1200static int do_posix_clock_monotonic_get(const clockid_t clock,
1201 struct timespec *tp)
1202{
1203 struct timespec wall_to_mono;
1204
1205 do_posix_clock_monotonic_gettime_parts(tp, &wall_to_mono);
1206
1207 set_normalized_timespec(tp, tp->tv_sec + wall_to_mono.tv_sec,
1208 tp->tv_nsec + wall_to_mono.tv_nsec);
1209
1210 return 0;
1211}
1212
1213int do_posix_clock_monotonic_gettime(struct timespec *tp)
1214{
1215 return do_posix_clock_monotonic_get(CLOCK_MONOTONIC, tp);
1216}
1217
1218int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp) 872int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
1219{ 873{
1220 return -EINVAL; 874 return -EINVAL;
@@ -1288,107 +942,6 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
1288} 942}
1289 943
1290/* 944/*
1291 * The standard says that an absolute nanosleep call MUST wake up at
1292 * the requested time in spite of clock settings. Here is what we do:
1293 * For each nanosleep call that needs it (only absolute and not on
1294 * CLOCK_MONOTONIC* (as it can not be set)) we thread a little structure
1295 * into the "nanosleep_abs_list". All we need is the task_struct pointer.
1296 * When ever the clock is set we just wake up all those tasks. The rest
1297 * is done by the while loop in clock_nanosleep().
1298 *
1299 * On locking, clock_was_set() is called from update_wall_clock which
1300 * holds (or has held for it) a write_lock_irq( xtime_lock) and is
1301 * called from the timer bh code. Thus we need the irq save locks.
1302 *
1303 * Also, on the call from update_wall_clock, that is done as part of a
1304 * softirq thing. We don't want to delay the system that much (possibly
1305 * long list of timers to fix), so we defer that work to keventd.
1306 */
1307
1308static DECLARE_WAIT_QUEUE_HEAD(nanosleep_abs_wqueue);
1309static DECLARE_WORK(clock_was_set_work, (void(*)(void*))clock_was_set, NULL);
1310
1311static DECLARE_MUTEX(clock_was_set_lock);
1312
1313void clock_was_set(void)
1314{
1315 struct k_itimer *timr;
1316 struct timespec new_wall_to;
1317 LIST_HEAD(cws_list);
1318 unsigned long seq;
1319
1320
1321 if (unlikely(in_interrupt())) {
1322 schedule_work(&clock_was_set_work);
1323 return;
1324 }
1325 wake_up_all(&nanosleep_abs_wqueue);
1326
1327 /*
1328 * Check if there exist TIMER_ABSTIME timers to correct.
1329 *
1330 * Notes on locking: This code is run in task context with irq
1331 * on. We CAN be interrupted! All other usage of the abs list
1332 * lock is under the timer lock which holds the irq lock as
1333 * well. We REALLY don't want to scan the whole list with the
1334 * interrupt system off, AND we would like a sequence lock on
1335 * this code as well. Since we assume that the clock will not
1336 * be set often, it seems ok to take and release the irq lock
1337 * for each timer. In fact add_timer will do this, so this is
1338 * not an issue. So we know when we are done, we will move the
1339 * whole list to a new location. Then as we process each entry,
1340 * we will move it to the actual list again. This way, when our
1341 * copy is empty, we are done. We are not all that concerned
1342 * about preemption so we will use a semaphore lock to protect
1343 * aginst reentry. This way we will not stall another
1344 * processor. It is possible that this may delay some timers
1345 * that should have expired, given the new clock, but even this
1346 * will be minimal as we will always update to the current time,
1347 * even if it was set by a task that is waiting for entry to
1348 * this code. Timers that expire too early will be caught by
1349 * the expire code and restarted.
1350
1351 * Absolute timers that repeat are left in the abs list while
1352 * waiting for the task to pick up the signal. This means we
1353 * may find timers that are not in the "add_timer" list, but are
1354 * in the abs list. We do the same thing for these, save
1355 * putting them back in the "add_timer" list. (Note, these are
1356 * left in the abs list mainly to indicate that they are
1357 * ABSOLUTE timers, a fact that is used by the re-arm code, and
1358 * for which we have no other flag.)
1359
1360 */
1361
1362 down(&clock_was_set_lock);
1363 spin_lock_irq(&abs_list.lock);
1364 list_splice_init(&abs_list.list, &cws_list);
1365 spin_unlock_irq(&abs_list.lock);
1366 do {
1367 do {
1368 seq = read_seqbegin(&xtime_lock);
1369 new_wall_to = wall_to_monotonic;
1370 } while (read_seqretry(&xtime_lock, seq));
1371
1372 spin_lock_irq(&abs_list.lock);
1373 if (list_empty(&cws_list)) {
1374 spin_unlock_irq(&abs_list.lock);
1375 break;
1376 }
1377 timr = list_entry(cws_list.next, struct k_itimer,
1378 it.real.abs_timer_entry);
1379
1380 list_del_init(&timr->it.real.abs_timer_entry);
1381 if (add_clockset_delta(timr, &new_wall_to) &&
1382 del_timer(&timr->it.real.timer)) /* timer run yet? */
1383 add_timer(&timr->it.real.timer);
1384 list_add(&timr->it.real.abs_timer_entry, &abs_list.list);
1385 spin_unlock_irq(&abs_list.lock);
1386 } while (1);
1387
1388 up(&clock_was_set_lock);
1389}
1390
1391/*
1392 * nanosleep for monotonic and realtime clocks 945 * nanosleep for monotonic and realtime clocks
1393 */ 946 */
1394static int common_nsleep(const clockid_t which_clock, int flags, 947static int common_nsleep(const clockid_t which_clock, int flags,
@@ -1401,7 +954,7 @@ static int common_nsleep(const clockid_t which_clock, int flags,
1401 case CLOCK_REALTIME: 954 case CLOCK_REALTIME:
1402 /* Posix madness. Only absolute timers on clock realtime 955 /* Posix madness. Only absolute timers on clock realtime
1403 are affected by clock set. */ 956 are affected by clock set. */
1404 if (mode == HRTIMER_ABS) 957 if (mode != HRTIMER_ABS)
1405 clockid = CLOCK_MONOTONIC; 958 clockid = CLOCK_MONOTONIC;
1406 case CLOCK_MONOTONIC: 959 case CLOCK_MONOTONIC:
1407 break; 960 break;