diff options
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r-- | kernel/posix-timers.c | 369 |
1 files changed, 196 insertions, 173 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 93bd2eb2bc53..4556182527f3 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/compiler.h> | 42 | #include <linux/compiler.h> |
43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
44 | #include <linux/posix-clock.h> | ||
44 | #include <linux/posix-timers.h> | 45 | #include <linux/posix-timers.h> |
45 | #include <linux/syscalls.h> | 46 | #include <linux/syscalls.h> |
46 | #include <linux/wait.h> | 47 | #include <linux/wait.h> |
@@ -81,6 +82,14 @@ static DEFINE_SPINLOCK(idr_lock); | |||
81 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" | 82 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" |
82 | #endif | 83 | #endif |
83 | 84 | ||
85 | /* | ||
86 | * parisc wants ENOTSUP instead of EOPNOTSUPP | ||
87 | */ | ||
88 | #ifndef ENOTSUP | ||
89 | # define ENANOSLEEP_NOTSUP EOPNOTSUPP | ||
90 | #else | ||
91 | # define ENANOSLEEP_NOTSUP ENOTSUP | ||
92 | #endif | ||
84 | 93 | ||
85 | /* | 94 | /* |
86 | * The timer ID is turned into a timer address by idr_find(). | 95 | * The timer ID is turned into a timer address by idr_find(). |
@@ -94,11 +103,7 @@ static DEFINE_SPINLOCK(idr_lock); | |||
94 | /* | 103 | /* |
95 | * CLOCKs: The POSIX standard calls for a couple of clocks and allows us | 104 | * CLOCKs: The POSIX standard calls for a couple of clocks and allows us |
96 | * to implement others. This structure defines the various | 105 | * to implement others. This structure defines the various |
97 | * clocks and allows the possibility of adding others. We | 106 | * clocks. |
98 | * provide an interface to add clocks to the table and expect | ||
99 | * the "arch" code to add at least one clock that is high | ||
100 | * resolution. Here we define the standard CLOCK_REALTIME as a | ||
101 | * 1/HZ resolution clock. | ||
102 | * | 107 | * |
103 | * RESOLUTION: Clock resolution is used to round up timer and interval | 108 | * RESOLUTION: Clock resolution is used to round up timer and interval |
104 | * times, NOT to report clock times, which are reported with as | 109 | * times, NOT to report clock times, which are reported with as |
@@ -108,20 +113,13 @@ static DEFINE_SPINLOCK(idr_lock); | |||
108 | * necessary code is written. The standard says we should say | 113 | * necessary code is written. The standard says we should say |
109 | * something about this issue in the documentation... | 114 | * something about this issue in the documentation... |
110 | * | 115 | * |
111 | * FUNCTIONS: The CLOCKs structure defines possible functions to handle | 116 | * FUNCTIONS: The CLOCKs structure defines possible functions to |
112 | * various clock functions. For clocks that use the standard | 117 | * handle various clock functions. |
113 | * system timer code these entries should be NULL. This will | ||
114 | * allow dispatch without the overhead of indirect function | ||
115 | * calls. CLOCKS that depend on other sources (e.g. WWV or GPS) | ||
116 | * must supply functions here, even if the function just returns | ||
117 | * ENOSYS. The standard POSIX timer management code assumes the | ||
118 | * following: 1.) The k_itimer struct (sched.h) is used for the | ||
119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_pid | ||
120 | * fields are not modified by timer code. | ||
121 | * | 118 | * |
122 | * At this time all functions EXCEPT clock_nanosleep can be | 119 | * The standard POSIX timer management code assumes the |
123 | * redirected by the CLOCKS structure. Clock_nanosleep is in | 120 | * following: 1.) The k_itimer struct (sched.h) is used for |
124 | * there, but the code ignores it. | 121 | * the timer. 2.) The list, it_lock, it_clock, it_id and |
122 | * it_pid fields are not modified by timer code. | ||
125 | * | 123 | * |
126 | * Permissions: It is assumed that the clock_settime() function defined | 124 | * Permissions: It is assumed that the clock_settime() function defined |
127 | * for each clock will take care of permission checks. Some | 125 | * for each clock will take care of permission checks. Some |
@@ -138,6 +136,7 @@ static struct k_clock posix_clocks[MAX_CLOCKS]; | |||
138 | */ | 136 | */ |
139 | static int common_nsleep(const clockid_t, int flags, struct timespec *t, | 137 | static int common_nsleep(const clockid_t, int flags, struct timespec *t, |
140 | struct timespec __user *rmtp); | 138 | struct timespec __user *rmtp); |
139 | static int common_timer_create(struct k_itimer *new_timer); | ||
141 | static void common_timer_get(struct k_itimer *, struct itimerspec *); | 140 | static void common_timer_get(struct k_itimer *, struct itimerspec *); |
142 | static int common_timer_set(struct k_itimer *, int, | 141 | static int common_timer_set(struct k_itimer *, int, |
143 | struct itimerspec *, struct itimerspec *); | 142 | struct itimerspec *, struct itimerspec *); |
@@ -158,76 +157,24 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) | |||
158 | spin_unlock_irqrestore(&timr->it_lock, flags); | 157 | spin_unlock_irqrestore(&timr->it_lock, flags); |
159 | } | 158 | } |
160 | 159 | ||
161 | /* | 160 | /* Get clock_realtime */ |
162 | * Call the k_clock hook function if non-null, or the default function. | 161 | static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp) |
163 | */ | ||
164 | #define CLOCK_DISPATCH(clock, call, arglist) \ | ||
165 | ((clock) < 0 ? posix_cpu_##call arglist : \ | ||
166 | (posix_clocks[clock].call != NULL \ | ||
167 | ? (*posix_clocks[clock].call) arglist : common_##call arglist)) | ||
168 | |||
169 | /* | ||
170 | * Default clock hook functions when the struct k_clock passed | ||
171 | * to register_posix_clock leaves a function pointer null. | ||
172 | * | ||
173 | * The function common_CALL is the default implementation for | ||
174 | * the function pointer CALL in struct k_clock. | ||
175 | */ | ||
176 | |||
177 | static inline int common_clock_getres(const clockid_t which_clock, | ||
178 | struct timespec *tp) | ||
179 | { | ||
180 | tp->tv_sec = 0; | ||
181 | tp->tv_nsec = posix_clocks[which_clock].res; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Get real time for posix timers | ||
187 | */ | ||
188 | static int common_clock_get(clockid_t which_clock, struct timespec *tp) | ||
189 | { | 162 | { |
190 | ktime_get_real_ts(tp); | 163 | ktime_get_real_ts(tp); |
191 | return 0; | 164 | return 0; |
192 | } | 165 | } |
193 | 166 | ||
194 | static inline int common_clock_set(const clockid_t which_clock, | 167 | /* Set clock_realtime */ |
195 | struct timespec *tp) | 168 | static int posix_clock_realtime_set(const clockid_t which_clock, |
169 | const struct timespec *tp) | ||
196 | { | 170 | { |
197 | return do_sys_settimeofday(tp, NULL); | 171 | return do_sys_settimeofday(tp, NULL); |
198 | } | 172 | } |
199 | 173 | ||
200 | static int common_timer_create(struct k_itimer *new_timer) | 174 | static int posix_clock_realtime_adj(const clockid_t which_clock, |
201 | { | 175 | struct timex *t) |
202 | hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int no_timer_create(struct k_itimer *new_timer) | ||
207 | { | ||
208 | return -EOPNOTSUPP; | ||
209 | } | ||
210 | |||
211 | static int no_nsleep(const clockid_t which_clock, int flags, | ||
212 | struct timespec *tsave, struct timespec __user *rmtp) | ||
213 | { | ||
214 | return -EOPNOTSUPP; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Return nonzero if we know a priori this clockid_t value is bogus. | ||
219 | */ | ||
220 | static inline int invalid_clockid(const clockid_t which_clock) | ||
221 | { | 176 | { |
222 | if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */ | 177 | return do_adjtimex(t); |
223 | return 0; | ||
224 | if ((unsigned) which_clock >= MAX_CLOCKS) | ||
225 | return 1; | ||
226 | if (posix_clocks[which_clock].clock_getres != NULL) | ||
227 | return 0; | ||
228 | if (posix_clocks[which_clock].res != 0) | ||
229 | return 0; | ||
230 | return 1; | ||
231 | } | 178 | } |
232 | 179 | ||
233 | /* | 180 | /* |
@@ -240,7 +187,7 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) | |||
240 | } | 187 | } |
241 | 188 | ||
242 | /* | 189 | /* |
243 | * Get monotonic time for posix timers | 190 | * Get monotonic-raw time for posix timers |
244 | */ | 191 | */ |
245 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) | 192 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) |
246 | { | 193 | { |
@@ -267,46 +214,70 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp | |||
267 | *tp = ktime_to_timespec(KTIME_LOW_RES); | 214 | *tp = ktime_to_timespec(KTIME_LOW_RES); |
268 | return 0; | 215 | return 0; |
269 | } | 216 | } |
217 | |||
218 | static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) | ||
219 | { | ||
220 | get_monotonic_boottime(tp); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | |||
270 | /* | 225 | /* |
271 | * Initialize everything, well, just everything in Posix clocks/timers ;) | 226 | * Initialize everything, well, just everything in Posix clocks/timers ;) |
272 | */ | 227 | */ |
273 | static __init int init_posix_timers(void) | 228 | static __init int init_posix_timers(void) |
274 | { | 229 | { |
275 | struct k_clock clock_realtime = { | 230 | struct k_clock clock_realtime = { |
276 | .clock_getres = hrtimer_get_res, | 231 | .clock_getres = hrtimer_get_res, |
232 | .clock_get = posix_clock_realtime_get, | ||
233 | .clock_set = posix_clock_realtime_set, | ||
234 | .clock_adj = posix_clock_realtime_adj, | ||
235 | .nsleep = common_nsleep, | ||
236 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
237 | .timer_create = common_timer_create, | ||
238 | .timer_set = common_timer_set, | ||
239 | .timer_get = common_timer_get, | ||
240 | .timer_del = common_timer_del, | ||
277 | }; | 241 | }; |
278 | struct k_clock clock_monotonic = { | 242 | struct k_clock clock_monotonic = { |
279 | .clock_getres = hrtimer_get_res, | 243 | .clock_getres = hrtimer_get_res, |
280 | .clock_get = posix_ktime_get_ts, | 244 | .clock_get = posix_ktime_get_ts, |
281 | .clock_set = do_posix_clock_nosettime, | 245 | .nsleep = common_nsleep, |
246 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
247 | .timer_create = common_timer_create, | ||
248 | .timer_set = common_timer_set, | ||
249 | .timer_get = common_timer_get, | ||
250 | .timer_del = common_timer_del, | ||
282 | }; | 251 | }; |
283 | struct k_clock clock_monotonic_raw = { | 252 | struct k_clock clock_monotonic_raw = { |
284 | .clock_getres = hrtimer_get_res, | 253 | .clock_getres = hrtimer_get_res, |
285 | .clock_get = posix_get_monotonic_raw, | 254 | .clock_get = posix_get_monotonic_raw, |
286 | .clock_set = do_posix_clock_nosettime, | ||
287 | .timer_create = no_timer_create, | ||
288 | .nsleep = no_nsleep, | ||
289 | }; | 255 | }; |
290 | struct k_clock clock_realtime_coarse = { | 256 | struct k_clock clock_realtime_coarse = { |
291 | .clock_getres = posix_get_coarse_res, | 257 | .clock_getres = posix_get_coarse_res, |
292 | .clock_get = posix_get_realtime_coarse, | 258 | .clock_get = posix_get_realtime_coarse, |
293 | .clock_set = do_posix_clock_nosettime, | ||
294 | .timer_create = no_timer_create, | ||
295 | .nsleep = no_nsleep, | ||
296 | }; | 259 | }; |
297 | struct k_clock clock_monotonic_coarse = { | 260 | struct k_clock clock_monotonic_coarse = { |
298 | .clock_getres = posix_get_coarse_res, | 261 | .clock_getres = posix_get_coarse_res, |
299 | .clock_get = posix_get_monotonic_coarse, | 262 | .clock_get = posix_get_monotonic_coarse, |
300 | .clock_set = do_posix_clock_nosettime, | 263 | }; |
301 | .timer_create = no_timer_create, | 264 | struct k_clock clock_boottime = { |
302 | .nsleep = no_nsleep, | 265 | .clock_getres = hrtimer_get_res, |
266 | .clock_get = posix_get_boottime, | ||
267 | .nsleep = common_nsleep, | ||
268 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
269 | .timer_create = common_timer_create, | ||
270 | .timer_set = common_timer_set, | ||
271 | .timer_get = common_timer_get, | ||
272 | .timer_del = common_timer_del, | ||
303 | }; | 273 | }; |
304 | 274 | ||
305 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 275 | posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime); |
306 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); | 276 | posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic); |
307 | register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); | 277 | posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); |
308 | register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); | 278 | posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); |
309 | register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); | 279 | posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); |
280 | posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime); | ||
310 | 281 | ||
311 | posix_timers_cache = kmem_cache_create("posix_timers_cache", | 282 | posix_timers_cache = kmem_cache_create("posix_timers_cache", |
312 | sizeof (struct k_itimer), 0, SLAB_PANIC, | 283 | sizeof (struct k_itimer), 0, SLAB_PANIC, |
@@ -342,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr) | |||
342 | * restarted (i.e. we have flagged this in the sys_private entry of the | 313 | * restarted (i.e. we have flagged this in the sys_private entry of the |
343 | * info block). | 314 | * info block). |
344 | * | 315 | * |
345 | * To protect aginst the timer going away while the interrupt is queued, | 316 | * To protect against the timer going away while the interrupt is queued, |
346 | * we require that the it_requeue_pending flag be set. | 317 | * we require that the it_requeue_pending flag be set. |
347 | */ | 318 | */ |
348 | void do_schedule_next_timer(struct siginfo *info) | 319 | void do_schedule_next_timer(struct siginfo *info) |
@@ -482,17 +453,29 @@ static struct pid *good_sigevent(sigevent_t * event) | |||
482 | return task_pid(rtn); | 453 | return task_pid(rtn); |
483 | } | 454 | } |
484 | 455 | ||
485 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) | 456 | void posix_timers_register_clock(const clockid_t clock_id, |
457 | struct k_clock *new_clock) | ||
486 | { | 458 | { |
487 | if ((unsigned) clock_id >= MAX_CLOCKS) { | 459 | if ((unsigned) clock_id >= MAX_CLOCKS) { |
488 | printk("POSIX clock register failed for clock_id %d\n", | 460 | printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n", |
461 | clock_id); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | if (!new_clock->clock_get) { | ||
466 | printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n", | ||
467 | clock_id); | ||
468 | return; | ||
469 | } | ||
470 | if (!new_clock->clock_getres) { | ||
471 | printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n", | ||
489 | clock_id); | 472 | clock_id); |
490 | return; | 473 | return; |
491 | } | 474 | } |
492 | 475 | ||
493 | posix_clocks[clock_id] = *new_clock; | 476 | posix_clocks[clock_id] = *new_clock; |
494 | } | 477 | } |
495 | EXPORT_SYMBOL_GPL(register_posix_clock); | 478 | EXPORT_SYMBOL_GPL(posix_timers_register_clock); |
496 | 479 | ||
497 | static struct k_itimer * alloc_posix_timer(void) | 480 | static struct k_itimer * alloc_posix_timer(void) |
498 | { | 481 | { |
@@ -508,6 +491,13 @@ static struct k_itimer * alloc_posix_timer(void) | |||
508 | return tmr; | 491 | return tmr; |
509 | } | 492 | } |
510 | 493 | ||
494 | static void k_itimer_rcu_free(struct rcu_head *head) | ||
495 | { | ||
496 | struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); | ||
497 | |||
498 | kmem_cache_free(posix_timers_cache, tmr); | ||
499 | } | ||
500 | |||
511 | #define IT_ID_SET 1 | 501 | #define IT_ID_SET 1 |
512 | #define IT_ID_NOT_SET 0 | 502 | #define IT_ID_NOT_SET 0 |
513 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | 503 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) |
@@ -520,7 +510,24 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
520 | } | 510 | } |
521 | put_pid(tmr->it_pid); | 511 | put_pid(tmr->it_pid); |
522 | sigqueue_free(tmr->sigq); | 512 | sigqueue_free(tmr->sigq); |
523 | kmem_cache_free(posix_timers_cache, tmr); | 513 | call_rcu(&tmr->it.rcu, k_itimer_rcu_free); |
514 | } | ||
515 | |||
516 | static struct k_clock *clockid_to_kclock(const clockid_t id) | ||
517 | { | ||
518 | if (id < 0) | ||
519 | return (id & CLOCKFD_MASK) == CLOCKFD ? | ||
520 | &clock_posix_dynamic : &clock_posix_cpu; | ||
521 | |||
522 | if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) | ||
523 | return NULL; | ||
524 | return &posix_clocks[id]; | ||
525 | } | ||
526 | |||
527 | static int common_timer_create(struct k_itimer *new_timer) | ||
528 | { | ||
529 | hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); | ||
530 | return 0; | ||
524 | } | 531 | } |
525 | 532 | ||
526 | /* Create a POSIX.1b interval timer. */ | 533 | /* Create a POSIX.1b interval timer. */ |
@@ -529,13 +536,16 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
529 | struct sigevent __user *, timer_event_spec, | 536 | struct sigevent __user *, timer_event_spec, |
530 | timer_t __user *, created_timer_id) | 537 | timer_t __user *, created_timer_id) |
531 | { | 538 | { |
539 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
532 | struct k_itimer *new_timer; | 540 | struct k_itimer *new_timer; |
533 | int error, new_timer_id; | 541 | int error, new_timer_id; |
534 | sigevent_t event; | 542 | sigevent_t event; |
535 | int it_id_set = IT_ID_NOT_SET; | 543 | int it_id_set = IT_ID_NOT_SET; |
536 | 544 | ||
537 | if (invalid_clockid(which_clock)) | 545 | if (!kc) |
538 | return -EINVAL; | 546 | return -EINVAL; |
547 | if (!kc->timer_create) | ||
548 | return -EOPNOTSUPP; | ||
539 | 549 | ||
540 | new_timer = alloc_posix_timer(); | 550 | new_timer = alloc_posix_timer(); |
541 | if (unlikely(!new_timer)) | 551 | if (unlikely(!new_timer)) |
@@ -597,7 +607,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
597 | goto out; | 607 | goto out; |
598 | } | 608 | } |
599 | 609 | ||
600 | error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); | 610 | error = kc->timer_create(new_timer); |
601 | if (error) | 611 | if (error) |
602 | goto out; | 612 | goto out; |
603 | 613 | ||
@@ -607,7 +617,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
607 | spin_unlock_irq(¤t->sighand->siglock); | 617 | spin_unlock_irq(¤t->sighand->siglock); |
608 | 618 | ||
609 | return 0; | 619 | return 0; |
610 | /* | 620 | /* |
611 | * In the case of the timer belonging to another task, after | 621 | * In the case of the timer belonging to another task, after |
612 | * the task is unlocked, the timer is owned by the other task | 622 | * the task is unlocked, the timer is owned by the other task |
613 | * and may cease to exist at any time. Don't use or modify | 623 | * and may cease to exist at any time. Don't use or modify |
@@ -628,22 +638,18 @@ out: | |||
628 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) | 638 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) |
629 | { | 639 | { |
630 | struct k_itimer *timr; | 640 | struct k_itimer *timr; |
631 | /* | 641 | |
632 | * Watch out here. We do a irqsave on the idr_lock and pass the | 642 | rcu_read_lock(); |
633 | * flags part over to the timer lock. Must not let interrupts in | ||
634 | * while we are moving the lock. | ||
635 | */ | ||
636 | spin_lock_irqsave(&idr_lock, *flags); | ||
637 | timr = idr_find(&posix_timers_id, (int)timer_id); | 643 | timr = idr_find(&posix_timers_id, (int)timer_id); |
638 | if (timr) { | 644 | if (timr) { |
639 | spin_lock(&timr->it_lock); | 645 | spin_lock_irqsave(&timr->it_lock, *flags); |
640 | if (timr->it_signal == current->signal) { | 646 | if (timr->it_signal == current->signal) { |
641 | spin_unlock(&idr_lock); | 647 | rcu_read_unlock(); |
642 | return timr; | 648 | return timr; |
643 | } | 649 | } |
644 | spin_unlock(&timr->it_lock); | 650 | spin_unlock_irqrestore(&timr->it_lock, *flags); |
645 | } | 651 | } |
646 | spin_unlock_irqrestore(&idr_lock, *flags); | 652 | rcu_read_unlock(); |
647 | 653 | ||
648 | return NULL; | 654 | return NULL; |
649 | } | 655 | } |
@@ -709,22 +715,28 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
709 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, | 715 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
710 | struct itimerspec __user *, setting) | 716 | struct itimerspec __user *, setting) |
711 | { | 717 | { |
712 | struct k_itimer *timr; | ||
713 | struct itimerspec cur_setting; | 718 | struct itimerspec cur_setting; |
719 | struct k_itimer *timr; | ||
720 | struct k_clock *kc; | ||
714 | unsigned long flags; | 721 | unsigned long flags; |
722 | int ret = 0; | ||
715 | 723 | ||
716 | timr = lock_timer(timer_id, &flags); | 724 | timr = lock_timer(timer_id, &flags); |
717 | if (!timr) | 725 | if (!timr) |
718 | return -EINVAL; | 726 | return -EINVAL; |
719 | 727 | ||
720 | CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting)); | 728 | kc = clockid_to_kclock(timr->it_clock); |
729 | if (WARN_ON_ONCE(!kc || !kc->timer_get)) | ||
730 | ret = -EINVAL; | ||
731 | else | ||
732 | kc->timer_get(timr, &cur_setting); | ||
721 | 733 | ||
722 | unlock_timer(timr, flags); | 734 | unlock_timer(timr, flags); |
723 | 735 | ||
724 | if (copy_to_user(setting, &cur_setting, sizeof (cur_setting))) | 736 | if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) |
725 | return -EFAULT; | 737 | return -EFAULT; |
726 | 738 | ||
727 | return 0; | 739 | return ret; |
728 | } | 740 | } |
729 | 741 | ||
730 | /* | 742 | /* |
@@ -813,6 +825,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, | |||
813 | int error = 0; | 825 | int error = 0; |
814 | unsigned long flag; | 826 | unsigned long flag; |
815 | struct itimerspec *rtn = old_setting ? &old_spec : NULL; | 827 | struct itimerspec *rtn = old_setting ? &old_spec : NULL; |
828 | struct k_clock *kc; | ||
816 | 829 | ||
817 | if (!new_setting) | 830 | if (!new_setting) |
818 | return -EINVAL; | 831 | return -EINVAL; |
@@ -828,8 +841,11 @@ retry: | |||
828 | if (!timr) | 841 | if (!timr) |
829 | return -EINVAL; | 842 | return -EINVAL; |
830 | 843 | ||
831 | error = CLOCK_DISPATCH(timr->it_clock, timer_set, | 844 | kc = clockid_to_kclock(timr->it_clock); |
832 | (timr, flags, &new_spec, rtn)); | 845 | if (WARN_ON_ONCE(!kc || !kc->timer_set)) |
846 | error = -EINVAL; | ||
847 | else | ||
848 | error = kc->timer_set(timr, flags, &new_spec, rtn); | ||
833 | 849 | ||
834 | unlock_timer(timr, flag); | 850 | unlock_timer(timr, flag); |
835 | if (error == TIMER_RETRY) { | 851 | if (error == TIMER_RETRY) { |
@@ -844,7 +860,7 @@ retry: | |||
844 | return error; | 860 | return error; |
845 | } | 861 | } |
846 | 862 | ||
847 | static inline int common_timer_del(struct k_itimer *timer) | 863 | static int common_timer_del(struct k_itimer *timer) |
848 | { | 864 | { |
849 | timer->it.real.interval.tv64 = 0; | 865 | timer->it.real.interval.tv64 = 0; |
850 | 866 | ||
@@ -855,7 +871,11 @@ static inline int common_timer_del(struct k_itimer *timer) | |||
855 | 871 | ||
856 | static inline int timer_delete_hook(struct k_itimer *timer) | 872 | static inline int timer_delete_hook(struct k_itimer *timer) |
857 | { | 873 | { |
858 | return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer)); | 874 | struct k_clock *kc = clockid_to_kclock(timer->it_clock); |
875 | |||
876 | if (WARN_ON_ONCE(!kc || !kc->timer_del)) | ||
877 | return -EINVAL; | ||
878 | return kc->timer_del(timer); | ||
859 | } | 879 | } |
860 | 880 | ||
861 | /* Delete a POSIX.1b interval timer. */ | 881 | /* Delete a POSIX.1b interval timer. */ |
@@ -927,69 +947,76 @@ void exit_itimers(struct signal_struct *sig) | |||
927 | } | 947 | } |
928 | } | 948 | } |
929 | 949 | ||
930 | /* Not available / possible... functions */ | ||
931 | int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp) | ||
932 | { | ||
933 | return -EINVAL; | ||
934 | } | ||
935 | EXPORT_SYMBOL_GPL(do_posix_clock_nosettime); | ||
936 | |||
937 | int do_posix_clock_nonanosleep(const clockid_t clock, int flags, | ||
938 | struct timespec *t, struct timespec __user *r) | ||
939 | { | ||
940 | #ifndef ENOTSUP | ||
941 | return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */ | ||
942 | #else /* parisc does define it separately. */ | ||
943 | return -ENOTSUP; | ||
944 | #endif | ||
945 | } | ||
946 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); | ||
947 | |||
948 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, | 950 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, |
949 | const struct timespec __user *, tp) | 951 | const struct timespec __user *, tp) |
950 | { | 952 | { |
953 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
951 | struct timespec new_tp; | 954 | struct timespec new_tp; |
952 | 955 | ||
953 | if (invalid_clockid(which_clock)) | 956 | if (!kc || !kc->clock_set) |
954 | return -EINVAL; | 957 | return -EINVAL; |
958 | |||
955 | if (copy_from_user(&new_tp, tp, sizeof (*tp))) | 959 | if (copy_from_user(&new_tp, tp, sizeof (*tp))) |
956 | return -EFAULT; | 960 | return -EFAULT; |
957 | 961 | ||
958 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); | 962 | return kc->clock_set(which_clock, &new_tp); |
959 | } | 963 | } |
960 | 964 | ||
961 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, | 965 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, |
962 | struct timespec __user *,tp) | 966 | struct timespec __user *,tp) |
963 | { | 967 | { |
968 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
964 | struct timespec kernel_tp; | 969 | struct timespec kernel_tp; |
965 | int error; | 970 | int error; |
966 | 971 | ||
967 | if (invalid_clockid(which_clock)) | 972 | if (!kc) |
968 | return -EINVAL; | 973 | return -EINVAL; |
969 | error = CLOCK_DISPATCH(which_clock, clock_get, | 974 | |
970 | (which_clock, &kernel_tp)); | 975 | error = kc->clock_get(which_clock, &kernel_tp); |
976 | |||
971 | if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) | 977 | if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) |
972 | error = -EFAULT; | 978 | error = -EFAULT; |
973 | 979 | ||
974 | return error; | 980 | return error; |
981 | } | ||
982 | |||
983 | SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, | ||
984 | struct timex __user *, utx) | ||
985 | { | ||
986 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
987 | struct timex ktx; | ||
988 | int err; | ||
989 | |||
990 | if (!kc) | ||
991 | return -EINVAL; | ||
992 | if (!kc->clock_adj) | ||
993 | return -EOPNOTSUPP; | ||
975 | 994 | ||
995 | if (copy_from_user(&ktx, utx, sizeof(ktx))) | ||
996 | return -EFAULT; | ||
997 | |||
998 | err = kc->clock_adj(which_clock, &ktx); | ||
999 | |||
1000 | if (!err && copy_to_user(utx, &ktx, sizeof(ktx))) | ||
1001 | return -EFAULT; | ||
1002 | |||
1003 | return err; | ||
976 | } | 1004 | } |
977 | 1005 | ||
978 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, | 1006 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, |
979 | struct timespec __user *, tp) | 1007 | struct timespec __user *, tp) |
980 | { | 1008 | { |
1009 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
981 | struct timespec rtn_tp; | 1010 | struct timespec rtn_tp; |
982 | int error; | 1011 | int error; |
983 | 1012 | ||
984 | if (invalid_clockid(which_clock)) | 1013 | if (!kc) |
985 | return -EINVAL; | 1014 | return -EINVAL; |
986 | 1015 | ||
987 | error = CLOCK_DISPATCH(which_clock, clock_getres, | 1016 | error = kc->clock_getres(which_clock, &rtn_tp); |
988 | (which_clock, &rtn_tp)); | ||
989 | 1017 | ||
990 | if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) { | 1018 | if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) |
991 | error = -EFAULT; | 1019 | error = -EFAULT; |
992 | } | ||
993 | 1020 | ||
994 | return error; | 1021 | return error; |
995 | } | 1022 | } |
@@ -1009,10 +1036,13 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | |||
1009 | const struct timespec __user *, rqtp, | 1036 | const struct timespec __user *, rqtp, |
1010 | struct timespec __user *, rmtp) | 1037 | struct timespec __user *, rmtp) |
1011 | { | 1038 | { |
1039 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1012 | struct timespec t; | 1040 | struct timespec t; |
1013 | 1041 | ||
1014 | if (invalid_clockid(which_clock)) | 1042 | if (!kc) |
1015 | return -EINVAL; | 1043 | return -EINVAL; |
1044 | if (!kc->nsleep) | ||
1045 | return -ENANOSLEEP_NOTSUP; | ||
1016 | 1046 | ||
1017 | if (copy_from_user(&t, rqtp, sizeof (struct timespec))) | 1047 | if (copy_from_user(&t, rqtp, sizeof (struct timespec))) |
1018 | return -EFAULT; | 1048 | return -EFAULT; |
@@ -1020,27 +1050,20 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | |||
1020 | if (!timespec_valid(&t)) | 1050 | if (!timespec_valid(&t)) |
1021 | return -EINVAL; | 1051 | return -EINVAL; |
1022 | 1052 | ||
1023 | return CLOCK_DISPATCH(which_clock, nsleep, | 1053 | return kc->nsleep(which_clock, flags, &t, rmtp); |
1024 | (which_clock, flags, &t, rmtp)); | ||
1025 | } | ||
1026 | |||
1027 | /* | ||
1028 | * nanosleep_restart for monotonic and realtime clocks | ||
1029 | */ | ||
1030 | static int common_nsleep_restart(struct restart_block *restart_block) | ||
1031 | { | ||
1032 | return hrtimer_nanosleep_restart(restart_block); | ||
1033 | } | 1054 | } |
1034 | 1055 | ||
1035 | /* | 1056 | /* |
1036 | * This will restart clock_nanosleep. This is required only by | 1057 | * This will restart clock_nanosleep. This is required only by |
1037 | * compat_clock_nanosleep_restart for now. | 1058 | * compat_clock_nanosleep_restart for now. |
1038 | */ | 1059 | */ |
1039 | long | 1060 | long clock_nanosleep_restart(struct restart_block *restart_block) |
1040 | clock_nanosleep_restart(struct restart_block *restart_block) | ||
1041 | { | 1061 | { |
1042 | clockid_t which_clock = restart_block->arg0; | 1062 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1063 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1064 | |||
1065 | if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) | ||
1066 | return -EINVAL; | ||
1043 | 1067 | ||
1044 | return CLOCK_DISPATCH(which_clock, nsleep_restart, | 1068 | return kc->nsleep_restart(restart_block); |
1045 | (restart_block)); | ||
1046 | } | 1069 | } |