diff options
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r-- | kernel/posix-timers.c | 379 |
1 files changed, 204 insertions, 175 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 9ca4973f736d..4556182527f3 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/compiler.h> | 42 | #include <linux/compiler.h> |
43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
44 | #include <linux/posix-clock.h> | ||
44 | #include <linux/posix-timers.h> | 45 | #include <linux/posix-timers.h> |
45 | #include <linux/syscalls.h> | 46 | #include <linux/syscalls.h> |
46 | #include <linux/wait.h> | 47 | #include <linux/wait.h> |
@@ -81,6 +82,14 @@ static DEFINE_SPINLOCK(idr_lock); | |||
81 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" | 82 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" |
82 | #endif | 83 | #endif |
83 | 84 | ||
85 | /* | ||
86 | * parisc wants ENOTSUP instead of EOPNOTSUPP | ||
87 | */ | ||
88 | #ifndef ENOTSUP | ||
89 | # define ENANOSLEEP_NOTSUP EOPNOTSUPP | ||
90 | #else | ||
91 | # define ENANOSLEEP_NOTSUP ENOTSUP | ||
92 | #endif | ||
84 | 93 | ||
85 | /* | 94 | /* |
86 | * The timer ID is turned into a timer address by idr_find(). | 95 | * The timer ID is turned into a timer address by idr_find(). |
@@ -94,11 +103,7 @@ static DEFINE_SPINLOCK(idr_lock); | |||
94 | /* | 103 | /* |
95 | * CLOCKs: The POSIX standard calls for a couple of clocks and allows us | 104 | * CLOCKs: The POSIX standard calls for a couple of clocks and allows us |
96 | * to implement others. This structure defines the various | 105 | * to implement others. This structure defines the various |
97 | * clocks and allows the possibility of adding others. We | 106 | * clocks. |
98 | * provide an interface to add clocks to the table and expect | ||
99 | * the "arch" code to add at least one clock that is high | ||
100 | * resolution. Here we define the standard CLOCK_REALTIME as a | ||
101 | * 1/HZ resolution clock. | ||
102 | * | 107 | * |
103 | * RESOLUTION: Clock resolution is used to round up timer and interval | 108 | * RESOLUTION: Clock resolution is used to round up timer and interval |
104 | * times, NOT to report clock times, which are reported with as | 109 | * times, NOT to report clock times, which are reported with as |
@@ -108,20 +113,13 @@ static DEFINE_SPINLOCK(idr_lock); | |||
108 | * necessary code is written. The standard says we should say | 113 | * necessary code is written. The standard says we should say |
109 | * something about this issue in the documentation... | 114 | * something about this issue in the documentation... |
110 | * | 115 | * |
111 | * FUNCTIONS: The CLOCKs structure defines possible functions to handle | 116 | * FUNCTIONS: The CLOCKs structure defines possible functions to |
112 | * various clock functions. For clocks that use the standard | 117 | * handle various clock functions. |
113 | * system timer code these entries should be NULL. This will | ||
114 | * allow dispatch without the overhead of indirect function | ||
115 | * calls. CLOCKS that depend on other sources (e.g. WWV or GPS) | ||
116 | * must supply functions here, even if the function just returns | ||
117 | * ENOSYS. The standard POSIX timer management code assumes the | ||
118 | * following: 1.) The k_itimer struct (sched.h) is used for the | ||
119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_pid | ||
120 | * fields are not modified by timer code. | ||
121 | * | 118 | * |
122 | * At this time all functions EXCEPT clock_nanosleep can be | 119 | * The standard POSIX timer management code assumes the |
123 | * redirected by the CLOCKS structure. Clock_nanosleep is in | 120 | * following: 1.) The k_itimer struct (sched.h) is used for |
124 | * there, but the code ignores it. | 121 | * the timer. 2.) The list, it_lock, it_clock, it_id and |
122 | * it_pid fields are not modified by timer code. | ||
125 | * | 123 | * |
126 | * Permissions: It is assumed that the clock_settime() function defined | 124 | * Permissions: It is assumed that the clock_settime() function defined |
127 | * for each clock will take care of permission checks. Some | 125 | * for each clock will take care of permission checks. Some |
@@ -138,6 +136,7 @@ static struct k_clock posix_clocks[MAX_CLOCKS]; | |||
138 | */ | 136 | */ |
139 | static int common_nsleep(const clockid_t, int flags, struct timespec *t, | 137 | static int common_nsleep(const clockid_t, int flags, struct timespec *t, |
140 | struct timespec __user *rmtp); | 138 | struct timespec __user *rmtp); |
139 | static int common_timer_create(struct k_itimer *new_timer); | ||
141 | static void common_timer_get(struct k_itimer *, struct itimerspec *); | 140 | static void common_timer_get(struct k_itimer *, struct itimerspec *); |
142 | static int common_timer_set(struct k_itimer *, int, | 141 | static int common_timer_set(struct k_itimer *, int, |
143 | struct itimerspec *, struct itimerspec *); | 142 | struct itimerspec *, struct itimerspec *); |
@@ -145,83 +144,37 @@ static int common_timer_del(struct k_itimer *timer); | |||
145 | 144 | ||
146 | static enum hrtimer_restart posix_timer_fn(struct hrtimer *data); | 145 | static enum hrtimer_restart posix_timer_fn(struct hrtimer *data); |
147 | 146 | ||
148 | static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); | 147 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); |
148 | |||
149 | #define lock_timer(tid, flags) \ | ||
150 | ({ struct k_itimer *__timr; \ | ||
151 | __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ | ||
152 | __timr; \ | ||
153 | }) | ||
149 | 154 | ||
150 | static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) | 155 | static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) |
151 | { | 156 | { |
152 | spin_unlock_irqrestore(&timr->it_lock, flags); | 157 | spin_unlock_irqrestore(&timr->it_lock, flags); |
153 | } | 158 | } |
154 | 159 | ||
155 | /* | 160 | /* Get clock_realtime */ |
156 | * Call the k_clock hook function if non-null, or the default function. | 161 | static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp) |
157 | */ | ||
158 | #define CLOCK_DISPATCH(clock, call, arglist) \ | ||
159 | ((clock) < 0 ? posix_cpu_##call arglist : \ | ||
160 | (posix_clocks[clock].call != NULL \ | ||
161 | ? (*posix_clocks[clock].call) arglist : common_##call arglist)) | ||
162 | |||
163 | /* | ||
164 | * Default clock hook functions when the struct k_clock passed | ||
165 | * to register_posix_clock leaves a function pointer null. | ||
166 | * | ||
167 | * The function common_CALL is the default implementation for | ||
168 | * the function pointer CALL in struct k_clock. | ||
169 | */ | ||
170 | |||
171 | static inline int common_clock_getres(const clockid_t which_clock, | ||
172 | struct timespec *tp) | ||
173 | { | ||
174 | tp->tv_sec = 0; | ||
175 | tp->tv_nsec = posix_clocks[which_clock].res; | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Get real time for posix timers | ||
181 | */ | ||
182 | static int common_clock_get(clockid_t which_clock, struct timespec *tp) | ||
183 | { | 162 | { |
184 | ktime_get_real_ts(tp); | 163 | ktime_get_real_ts(tp); |
185 | return 0; | 164 | return 0; |
186 | } | 165 | } |
187 | 166 | ||
188 | static inline int common_clock_set(const clockid_t which_clock, | 167 | /* Set clock_realtime */ |
189 | struct timespec *tp) | 168 | static int posix_clock_realtime_set(const clockid_t which_clock, |
169 | const struct timespec *tp) | ||
190 | { | 170 | { |
191 | return do_sys_settimeofday(tp, NULL); | 171 | return do_sys_settimeofday(tp, NULL); |
192 | } | 172 | } |
193 | 173 | ||
194 | static int common_timer_create(struct k_itimer *new_timer) | 174 | static int posix_clock_realtime_adj(const clockid_t which_clock, |
195 | { | 175 | struct timex *t) |
196 | hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int no_timer_create(struct k_itimer *new_timer) | ||
201 | { | ||
202 | return -EOPNOTSUPP; | ||
203 | } | ||
204 | |||
205 | static int no_nsleep(const clockid_t which_clock, int flags, | ||
206 | struct timespec *tsave, struct timespec __user *rmtp) | ||
207 | { | 176 | { |
208 | return -EOPNOTSUPP; | 177 | return do_adjtimex(t); |
209 | } | ||
210 | |||
211 | /* | ||
212 | * Return nonzero if we know a priori this clockid_t value is bogus. | ||
213 | */ | ||
214 | static inline int invalid_clockid(const clockid_t which_clock) | ||
215 | { | ||
216 | if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */ | ||
217 | return 0; | ||
218 | if ((unsigned) which_clock >= MAX_CLOCKS) | ||
219 | return 1; | ||
220 | if (posix_clocks[which_clock].clock_getres != NULL) | ||
221 | return 0; | ||
222 | if (posix_clocks[which_clock].res != 0) | ||
223 | return 0; | ||
224 | return 1; | ||
225 | } | 178 | } |
226 | 179 | ||
227 | /* | 180 | /* |
@@ -234,7 +187,7 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) | |||
234 | } | 187 | } |
235 | 188 | ||
236 | /* | 189 | /* |
237 | * Get monotonic time for posix timers | 190 | * Get monotonic-raw time for posix timers |
238 | */ | 191 | */ |
239 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) | 192 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) |
240 | { | 193 | { |
@@ -261,46 +214,70 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp | |||
261 | *tp = ktime_to_timespec(KTIME_LOW_RES); | 214 | *tp = ktime_to_timespec(KTIME_LOW_RES); |
262 | return 0; | 215 | return 0; |
263 | } | 216 | } |
217 | |||
218 | static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) | ||
219 | { | ||
220 | get_monotonic_boottime(tp); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | |||
264 | /* | 225 | /* |
265 | * Initialize everything, well, just everything in Posix clocks/timers ;) | 226 | * Initialize everything, well, just everything in Posix clocks/timers ;) |
266 | */ | 227 | */ |
267 | static __init int init_posix_timers(void) | 228 | static __init int init_posix_timers(void) |
268 | { | 229 | { |
269 | struct k_clock clock_realtime = { | 230 | struct k_clock clock_realtime = { |
270 | .clock_getres = hrtimer_get_res, | 231 | .clock_getres = hrtimer_get_res, |
232 | .clock_get = posix_clock_realtime_get, | ||
233 | .clock_set = posix_clock_realtime_set, | ||
234 | .clock_adj = posix_clock_realtime_adj, | ||
235 | .nsleep = common_nsleep, | ||
236 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
237 | .timer_create = common_timer_create, | ||
238 | .timer_set = common_timer_set, | ||
239 | .timer_get = common_timer_get, | ||
240 | .timer_del = common_timer_del, | ||
271 | }; | 241 | }; |
272 | struct k_clock clock_monotonic = { | 242 | struct k_clock clock_monotonic = { |
273 | .clock_getres = hrtimer_get_res, | 243 | .clock_getres = hrtimer_get_res, |
274 | .clock_get = posix_ktime_get_ts, | 244 | .clock_get = posix_ktime_get_ts, |
275 | .clock_set = do_posix_clock_nosettime, | 245 | .nsleep = common_nsleep, |
246 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
247 | .timer_create = common_timer_create, | ||
248 | .timer_set = common_timer_set, | ||
249 | .timer_get = common_timer_get, | ||
250 | .timer_del = common_timer_del, | ||
276 | }; | 251 | }; |
277 | struct k_clock clock_monotonic_raw = { | 252 | struct k_clock clock_monotonic_raw = { |
278 | .clock_getres = hrtimer_get_res, | 253 | .clock_getres = hrtimer_get_res, |
279 | .clock_get = posix_get_monotonic_raw, | 254 | .clock_get = posix_get_monotonic_raw, |
280 | .clock_set = do_posix_clock_nosettime, | ||
281 | .timer_create = no_timer_create, | ||
282 | .nsleep = no_nsleep, | ||
283 | }; | 255 | }; |
284 | struct k_clock clock_realtime_coarse = { | 256 | struct k_clock clock_realtime_coarse = { |
285 | .clock_getres = posix_get_coarse_res, | 257 | .clock_getres = posix_get_coarse_res, |
286 | .clock_get = posix_get_realtime_coarse, | 258 | .clock_get = posix_get_realtime_coarse, |
287 | .clock_set = do_posix_clock_nosettime, | ||
288 | .timer_create = no_timer_create, | ||
289 | .nsleep = no_nsleep, | ||
290 | }; | 259 | }; |
291 | struct k_clock clock_monotonic_coarse = { | 260 | struct k_clock clock_monotonic_coarse = { |
292 | .clock_getres = posix_get_coarse_res, | 261 | .clock_getres = posix_get_coarse_res, |
293 | .clock_get = posix_get_monotonic_coarse, | 262 | .clock_get = posix_get_monotonic_coarse, |
294 | .clock_set = do_posix_clock_nosettime, | 263 | }; |
295 | .timer_create = no_timer_create, | 264 | struct k_clock clock_boottime = { |
296 | .nsleep = no_nsleep, | 265 | .clock_getres = hrtimer_get_res, |
266 | .clock_get = posix_get_boottime, | ||
267 | .nsleep = common_nsleep, | ||
268 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
269 | .timer_create = common_timer_create, | ||
270 | .timer_set = common_timer_set, | ||
271 | .timer_get = common_timer_get, | ||
272 | .timer_del = common_timer_del, | ||
297 | }; | 273 | }; |
298 | 274 | ||
299 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 275 | posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime); |
300 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); | 276 | posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic); |
301 | register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); | 277 | posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); |
302 | register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); | 278 | posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); |
303 | register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); | 279 | posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); |
280 | posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime); | ||
304 | 281 | ||
305 | posix_timers_cache = kmem_cache_create("posix_timers_cache", | 282 | posix_timers_cache = kmem_cache_create("posix_timers_cache", |
306 | sizeof (struct k_itimer), 0, SLAB_PANIC, | 283 | sizeof (struct k_itimer), 0, SLAB_PANIC, |
@@ -336,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr) | |||
336 | * restarted (i.e. we have flagged this in the sys_private entry of the | 313 | * restarted (i.e. we have flagged this in the sys_private entry of the |
337 | * info block). | 314 | * info block). |
338 | * | 315 | * |
339 | * To protect aginst the timer going away while the interrupt is queued, | 316 | * To protect against the timer going away while the interrupt is queued, |
340 | * we require that the it_requeue_pending flag be set. | 317 | * we require that the it_requeue_pending flag be set. |
341 | */ | 318 | */ |
342 | void do_schedule_next_timer(struct siginfo *info) | 319 | void do_schedule_next_timer(struct siginfo *info) |
@@ -476,17 +453,29 @@ static struct pid *good_sigevent(sigevent_t * event) | |||
476 | return task_pid(rtn); | 453 | return task_pid(rtn); |
477 | } | 454 | } |
478 | 455 | ||
479 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) | 456 | void posix_timers_register_clock(const clockid_t clock_id, |
457 | struct k_clock *new_clock) | ||
480 | { | 458 | { |
481 | if ((unsigned) clock_id >= MAX_CLOCKS) { | 459 | if ((unsigned) clock_id >= MAX_CLOCKS) { |
482 | printk("POSIX clock register failed for clock_id %d\n", | 460 | printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n", |
461 | clock_id); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | if (!new_clock->clock_get) { | ||
466 | printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n", | ||
467 | clock_id); | ||
468 | return; | ||
469 | } | ||
470 | if (!new_clock->clock_getres) { | ||
471 | printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n", | ||
483 | clock_id); | 472 | clock_id); |
484 | return; | 473 | return; |
485 | } | 474 | } |
486 | 475 | ||
487 | posix_clocks[clock_id] = *new_clock; | 476 | posix_clocks[clock_id] = *new_clock; |
488 | } | 477 | } |
489 | EXPORT_SYMBOL_GPL(register_posix_clock); | 478 | EXPORT_SYMBOL_GPL(posix_timers_register_clock); |
490 | 479 | ||
491 | static struct k_itimer * alloc_posix_timer(void) | 480 | static struct k_itimer * alloc_posix_timer(void) |
492 | { | 481 | { |
@@ -502,6 +491,13 @@ static struct k_itimer * alloc_posix_timer(void) | |||
502 | return tmr; | 491 | return tmr; |
503 | } | 492 | } |
504 | 493 | ||
494 | static void k_itimer_rcu_free(struct rcu_head *head) | ||
495 | { | ||
496 | struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); | ||
497 | |||
498 | kmem_cache_free(posix_timers_cache, tmr); | ||
499 | } | ||
500 | |||
505 | #define IT_ID_SET 1 | 501 | #define IT_ID_SET 1 |
506 | #define IT_ID_NOT_SET 0 | 502 | #define IT_ID_NOT_SET 0 |
507 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | 503 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) |
@@ -514,7 +510,24 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
514 | } | 510 | } |
515 | put_pid(tmr->it_pid); | 511 | put_pid(tmr->it_pid); |
516 | sigqueue_free(tmr->sigq); | 512 | sigqueue_free(tmr->sigq); |
517 | kmem_cache_free(posix_timers_cache, tmr); | 513 | call_rcu(&tmr->it.rcu, k_itimer_rcu_free); |
514 | } | ||
515 | |||
516 | static struct k_clock *clockid_to_kclock(const clockid_t id) | ||
517 | { | ||
518 | if (id < 0) | ||
519 | return (id & CLOCKFD_MASK) == CLOCKFD ? | ||
520 | &clock_posix_dynamic : &clock_posix_cpu; | ||
521 | |||
522 | if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) | ||
523 | return NULL; | ||
524 | return &posix_clocks[id]; | ||
525 | } | ||
526 | |||
527 | static int common_timer_create(struct k_itimer *new_timer) | ||
528 | { | ||
529 | hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); | ||
530 | return 0; | ||
518 | } | 531 | } |
519 | 532 | ||
520 | /* Create a POSIX.1b interval timer. */ | 533 | /* Create a POSIX.1b interval timer. */ |
@@ -523,13 +536,16 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
523 | struct sigevent __user *, timer_event_spec, | 536 | struct sigevent __user *, timer_event_spec, |
524 | timer_t __user *, created_timer_id) | 537 | timer_t __user *, created_timer_id) |
525 | { | 538 | { |
539 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
526 | struct k_itimer *new_timer; | 540 | struct k_itimer *new_timer; |
527 | int error, new_timer_id; | 541 | int error, new_timer_id; |
528 | sigevent_t event; | 542 | sigevent_t event; |
529 | int it_id_set = IT_ID_NOT_SET; | 543 | int it_id_set = IT_ID_NOT_SET; |
530 | 544 | ||
531 | if (invalid_clockid(which_clock)) | 545 | if (!kc) |
532 | return -EINVAL; | 546 | return -EINVAL; |
547 | if (!kc->timer_create) | ||
548 | return -EOPNOTSUPP; | ||
533 | 549 | ||
534 | new_timer = alloc_posix_timer(); | 550 | new_timer = alloc_posix_timer(); |
535 | if (unlikely(!new_timer)) | 551 | if (unlikely(!new_timer)) |
@@ -591,7 +607,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
591 | goto out; | 607 | goto out; |
592 | } | 608 | } |
593 | 609 | ||
594 | error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); | 610 | error = kc->timer_create(new_timer); |
595 | if (error) | 611 | if (error) |
596 | goto out; | 612 | goto out; |
597 | 613 | ||
@@ -601,7 +617,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | |||
601 | spin_unlock_irq(¤t->sighand->siglock); | 617 | spin_unlock_irq(¤t->sighand->siglock); |
602 | 618 | ||
603 | return 0; | 619 | return 0; |
604 | /* | 620 | /* |
605 | * In the case of the timer belonging to another task, after | 621 | * In the case of the timer belonging to another task, after |
606 | * the task is unlocked, the timer is owned by the other task | 622 | * the task is unlocked, the timer is owned by the other task |
607 | * and may cease to exist at any time. Don't use or modify | 623 | * and may cease to exist at any time. Don't use or modify |
@@ -619,25 +635,21 @@ out: | |||
619 | * the find to the timer lock. To avoid a dead lock, the timer id MUST | 635 | * the find to the timer lock. To avoid a dead lock, the timer id MUST |
620 | * be release with out holding the timer lock. | 636 | * be release with out holding the timer lock. |
621 | */ | 637 | */ |
622 | static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) | 638 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) |
623 | { | 639 | { |
624 | struct k_itimer *timr; | 640 | struct k_itimer *timr; |
625 | /* | 641 | |
626 | * Watch out here. We do a irqsave on the idr_lock and pass the | 642 | rcu_read_lock(); |
627 | * flags part over to the timer lock. Must not let interrupts in | ||
628 | * while we are moving the lock. | ||
629 | */ | ||
630 | spin_lock_irqsave(&idr_lock, *flags); | ||
631 | timr = idr_find(&posix_timers_id, (int)timer_id); | 643 | timr = idr_find(&posix_timers_id, (int)timer_id); |
632 | if (timr) { | 644 | if (timr) { |
633 | spin_lock(&timr->it_lock); | 645 | spin_lock_irqsave(&timr->it_lock, *flags); |
634 | if (timr->it_signal == current->signal) { | 646 | if (timr->it_signal == current->signal) { |
635 | spin_unlock(&idr_lock); | 647 | rcu_read_unlock(); |
636 | return timr; | 648 | return timr; |
637 | } | 649 | } |
638 | spin_unlock(&timr->it_lock); | 650 | spin_unlock_irqrestore(&timr->it_lock, *flags); |
639 | } | 651 | } |
640 | spin_unlock_irqrestore(&idr_lock, *flags); | 652 | rcu_read_unlock(); |
641 | 653 | ||
642 | return NULL; | 654 | return NULL; |
643 | } | 655 | } |
@@ -703,22 +715,28 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
703 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, | 715 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
704 | struct itimerspec __user *, setting) | 716 | struct itimerspec __user *, setting) |
705 | { | 717 | { |
706 | struct k_itimer *timr; | ||
707 | struct itimerspec cur_setting; | 718 | struct itimerspec cur_setting; |
719 | struct k_itimer *timr; | ||
720 | struct k_clock *kc; | ||
708 | unsigned long flags; | 721 | unsigned long flags; |
722 | int ret = 0; | ||
709 | 723 | ||
710 | timr = lock_timer(timer_id, &flags); | 724 | timr = lock_timer(timer_id, &flags); |
711 | if (!timr) | 725 | if (!timr) |
712 | return -EINVAL; | 726 | return -EINVAL; |
713 | 727 | ||
714 | CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting)); | 728 | kc = clockid_to_kclock(timr->it_clock); |
729 | if (WARN_ON_ONCE(!kc || !kc->timer_get)) | ||
730 | ret = -EINVAL; | ||
731 | else | ||
732 | kc->timer_get(timr, &cur_setting); | ||
715 | 733 | ||
716 | unlock_timer(timr, flags); | 734 | unlock_timer(timr, flags); |
717 | 735 | ||
718 | if (copy_to_user(setting, &cur_setting, sizeof (cur_setting))) | 736 | if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) |
719 | return -EFAULT; | 737 | return -EFAULT; |
720 | 738 | ||
721 | return 0; | 739 | return ret; |
722 | } | 740 | } |
723 | 741 | ||
724 | /* | 742 | /* |
@@ -807,6 +825,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, | |||
807 | int error = 0; | 825 | int error = 0; |
808 | unsigned long flag; | 826 | unsigned long flag; |
809 | struct itimerspec *rtn = old_setting ? &old_spec : NULL; | 827 | struct itimerspec *rtn = old_setting ? &old_spec : NULL; |
828 | struct k_clock *kc; | ||
810 | 829 | ||
811 | if (!new_setting) | 830 | if (!new_setting) |
812 | return -EINVAL; | 831 | return -EINVAL; |
@@ -822,8 +841,11 @@ retry: | |||
822 | if (!timr) | 841 | if (!timr) |
823 | return -EINVAL; | 842 | return -EINVAL; |
824 | 843 | ||
825 | error = CLOCK_DISPATCH(timr->it_clock, timer_set, | 844 | kc = clockid_to_kclock(timr->it_clock); |
826 | (timr, flags, &new_spec, rtn)); | 845 | if (WARN_ON_ONCE(!kc || !kc->timer_set)) |
846 | error = -EINVAL; | ||
847 | else | ||
848 | error = kc->timer_set(timr, flags, &new_spec, rtn); | ||
827 | 849 | ||
828 | unlock_timer(timr, flag); | 850 | unlock_timer(timr, flag); |
829 | if (error == TIMER_RETRY) { | 851 | if (error == TIMER_RETRY) { |
@@ -838,7 +860,7 @@ retry: | |||
838 | return error; | 860 | return error; |
839 | } | 861 | } |
840 | 862 | ||
841 | static inline int common_timer_del(struct k_itimer *timer) | 863 | static int common_timer_del(struct k_itimer *timer) |
842 | { | 864 | { |
843 | timer->it.real.interval.tv64 = 0; | 865 | timer->it.real.interval.tv64 = 0; |
844 | 866 | ||
@@ -849,7 +871,11 @@ static inline int common_timer_del(struct k_itimer *timer) | |||
849 | 871 | ||
850 | static inline int timer_delete_hook(struct k_itimer *timer) | 872 | static inline int timer_delete_hook(struct k_itimer *timer) |
851 | { | 873 | { |
852 | return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer)); | 874 | struct k_clock *kc = clockid_to_kclock(timer->it_clock); |
875 | |||
876 | if (WARN_ON_ONCE(!kc || !kc->timer_del)) | ||
877 | return -EINVAL; | ||
878 | return kc->timer_del(timer); | ||
853 | } | 879 | } |
854 | 880 | ||
855 | /* Delete a POSIX.1b interval timer. */ | 881 | /* Delete a POSIX.1b interval timer. */ |
@@ -921,69 +947,76 @@ void exit_itimers(struct signal_struct *sig) | |||
921 | } | 947 | } |
922 | } | 948 | } |
923 | 949 | ||
924 | /* Not available / possible... functions */ | ||
925 | int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp) | ||
926 | { | ||
927 | return -EINVAL; | ||
928 | } | ||
929 | EXPORT_SYMBOL_GPL(do_posix_clock_nosettime); | ||
930 | |||
931 | int do_posix_clock_nonanosleep(const clockid_t clock, int flags, | ||
932 | struct timespec *t, struct timespec __user *r) | ||
933 | { | ||
934 | #ifndef ENOTSUP | ||
935 | return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */ | ||
936 | #else /* parisc does define it separately. */ | ||
937 | return -ENOTSUP; | ||
938 | #endif | ||
939 | } | ||
940 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); | ||
941 | |||
942 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, | 950 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, |
943 | const struct timespec __user *, tp) | 951 | const struct timespec __user *, tp) |
944 | { | 952 | { |
953 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
945 | struct timespec new_tp; | 954 | struct timespec new_tp; |
946 | 955 | ||
947 | if (invalid_clockid(which_clock)) | 956 | if (!kc || !kc->clock_set) |
948 | return -EINVAL; | 957 | return -EINVAL; |
958 | |||
949 | if (copy_from_user(&new_tp, tp, sizeof (*tp))) | 959 | if (copy_from_user(&new_tp, tp, sizeof (*tp))) |
950 | return -EFAULT; | 960 | return -EFAULT; |
951 | 961 | ||
952 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); | 962 | return kc->clock_set(which_clock, &new_tp); |
953 | } | 963 | } |
954 | 964 | ||
955 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, | 965 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, |
956 | struct timespec __user *,tp) | 966 | struct timespec __user *,tp) |
957 | { | 967 | { |
968 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
958 | struct timespec kernel_tp; | 969 | struct timespec kernel_tp; |
959 | int error; | 970 | int error; |
960 | 971 | ||
961 | if (invalid_clockid(which_clock)) | 972 | if (!kc) |
962 | return -EINVAL; | 973 | return -EINVAL; |
963 | error = CLOCK_DISPATCH(which_clock, clock_get, | 974 | |
964 | (which_clock, &kernel_tp)); | 975 | error = kc->clock_get(which_clock, &kernel_tp); |
976 | |||
965 | if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) | 977 | if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) |
966 | error = -EFAULT; | 978 | error = -EFAULT; |
967 | 979 | ||
968 | return error; | 980 | return error; |
981 | } | ||
982 | |||
983 | SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, | ||
984 | struct timex __user *, utx) | ||
985 | { | ||
986 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
987 | struct timex ktx; | ||
988 | int err; | ||
989 | |||
990 | if (!kc) | ||
991 | return -EINVAL; | ||
992 | if (!kc->clock_adj) | ||
993 | return -EOPNOTSUPP; | ||
969 | 994 | ||
995 | if (copy_from_user(&ktx, utx, sizeof(ktx))) | ||
996 | return -EFAULT; | ||
997 | |||
998 | err = kc->clock_adj(which_clock, &ktx); | ||
999 | |||
1000 | if (!err && copy_to_user(utx, &ktx, sizeof(ktx))) | ||
1001 | return -EFAULT; | ||
1002 | |||
1003 | return err; | ||
970 | } | 1004 | } |
971 | 1005 | ||
972 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, | 1006 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, |
973 | struct timespec __user *, tp) | 1007 | struct timespec __user *, tp) |
974 | { | 1008 | { |
1009 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
975 | struct timespec rtn_tp; | 1010 | struct timespec rtn_tp; |
976 | int error; | 1011 | int error; |
977 | 1012 | ||
978 | if (invalid_clockid(which_clock)) | 1013 | if (!kc) |
979 | return -EINVAL; | 1014 | return -EINVAL; |
980 | 1015 | ||
981 | error = CLOCK_DISPATCH(which_clock, clock_getres, | 1016 | error = kc->clock_getres(which_clock, &rtn_tp); |
982 | (which_clock, &rtn_tp)); | ||
983 | 1017 | ||
984 | if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) { | 1018 | if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) |
985 | error = -EFAULT; | 1019 | error = -EFAULT; |
986 | } | ||
987 | 1020 | ||
988 | return error; | 1021 | return error; |
989 | } | 1022 | } |
@@ -1003,10 +1036,13 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | |||
1003 | const struct timespec __user *, rqtp, | 1036 | const struct timespec __user *, rqtp, |
1004 | struct timespec __user *, rmtp) | 1037 | struct timespec __user *, rmtp) |
1005 | { | 1038 | { |
1039 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1006 | struct timespec t; | 1040 | struct timespec t; |
1007 | 1041 | ||
1008 | if (invalid_clockid(which_clock)) | 1042 | if (!kc) |
1009 | return -EINVAL; | 1043 | return -EINVAL; |
1044 | if (!kc->nsleep) | ||
1045 | return -ENANOSLEEP_NOTSUP; | ||
1010 | 1046 | ||
1011 | if (copy_from_user(&t, rqtp, sizeof (struct timespec))) | 1047 | if (copy_from_user(&t, rqtp, sizeof (struct timespec))) |
1012 | return -EFAULT; | 1048 | return -EFAULT; |
@@ -1014,27 +1050,20 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | |||
1014 | if (!timespec_valid(&t)) | 1050 | if (!timespec_valid(&t)) |
1015 | return -EINVAL; | 1051 | return -EINVAL; |
1016 | 1052 | ||
1017 | return CLOCK_DISPATCH(which_clock, nsleep, | 1053 | return kc->nsleep(which_clock, flags, &t, rmtp); |
1018 | (which_clock, flags, &t, rmtp)); | ||
1019 | } | ||
1020 | |||
1021 | /* | ||
1022 | * nanosleep_restart for monotonic and realtime clocks | ||
1023 | */ | ||
1024 | static int common_nsleep_restart(struct restart_block *restart_block) | ||
1025 | { | ||
1026 | return hrtimer_nanosleep_restart(restart_block); | ||
1027 | } | 1054 | } |
1028 | 1055 | ||
1029 | /* | 1056 | /* |
1030 | * This will restart clock_nanosleep. This is required only by | 1057 | * This will restart clock_nanosleep. This is required only by |
1031 | * compat_clock_nanosleep_restart for now. | 1058 | * compat_clock_nanosleep_restart for now. |
1032 | */ | 1059 | */ |
1033 | long | 1060 | long clock_nanosleep_restart(struct restart_block *restart_block) |
1034 | clock_nanosleep_restart(struct restart_block *restart_block) | ||
1035 | { | 1061 | { |
1036 | clockid_t which_clock = restart_block->arg0; | 1062 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1063 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1064 | |||
1065 | if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) | ||
1066 | return -EINVAL; | ||
1037 | 1067 | ||
1038 | return CLOCK_DISPATCH(which_clock, nsleep_restart, | 1068 | return kc->nsleep_restart(restart_block); |
1039 | (restart_block)); | ||
1040 | } | 1069 | } |