diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-06-22 06:06:40 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-06-23 05:22:35 -0400 |
commit | 5cee964597260237dd2cabb3ec22bba0da24b25d (patch) | |
tree | f548efb4181a4cffb026adf43178e65330533e87 /kernel/posix-timers.c | |
parent | 58394271c610e9c65dd0165a1c1f6dec75dc5f3e (diff) |
time/timers: Move all time(r) related files into kernel/time
Except for Kconfig.HZ. That needs a separate treatment.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r-- | kernel/posix-timers.c | 1121 |
1 files changed, 0 insertions, 1121 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c deleted file mode 100644 index 424c2d4265c9..000000000000 --- a/kernel/posix-timers.c +++ /dev/null | |||
@@ -1,1121 +0,0 @@ | |||
1 | /* | ||
2 | * linux/kernel/posix-timers.c | ||
3 | * | ||
4 | * | ||
5 | * 2002-10-15 Posix Clocks & timers | ||
6 | * by George Anzinger george@mvista.com | ||
7 | * | ||
8 | * Copyright (C) 2002 2003 by MontaVista Software. | ||
9 | * | ||
10 | * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. | ||
11 | * Copyright (C) 2004 Boris Hu | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or (at | ||
16 | * your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, but | ||
19 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
21 | * General Public License for more details. | ||
22 | |||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
26 | * | ||
27 | * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA | ||
28 | */ | ||
29 | |||
30 | /* These are all the functions necessary to implement | ||
31 | * POSIX clocks & timers | ||
32 | */ | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/time.h> | ||
37 | #include <linux/mutex.h> | ||
38 | |||
39 | #include <asm/uaccess.h> | ||
40 | #include <linux/list.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/compiler.h> | ||
43 | #include <linux/hash.h> | ||
44 | #include <linux/posix-clock.h> | ||
45 | #include <linux/posix-timers.h> | ||
46 | #include <linux/syscalls.h> | ||
47 | #include <linux/wait.h> | ||
48 | #include <linux/workqueue.h> | ||
49 | #include <linux/export.h> | ||
50 | #include <linux/hashtable.h> | ||
51 | |||
52 | /* | ||
53 | * Management arrays for POSIX timers. Timers are now kept in static hash table | ||
54 | * with 512 entries. | ||
55 | * Timer ids are allocated by local routine, which selects proper hash head by | ||
56 | * key, constructed from current->signal address and per signal struct counter. | ||
57 | * This keeps timer ids unique per process, but now they can intersect between | ||
58 | * processes. | ||
59 | */ | ||
60 | |||
61 | /* | ||
62 | * Lets keep our timers in a slab cache :-) | ||
63 | */ | ||
64 | static struct kmem_cache *posix_timers_cache; | ||
65 | |||
66 | static DEFINE_HASHTABLE(posix_timers_hashtable, 9); | ||
67 | static DEFINE_SPINLOCK(hash_lock); | ||
68 | |||
69 | /* | ||
70 | * we assume that the new SIGEV_THREAD_ID shares no bits with the other | ||
71 | * SIGEV values. Here we put out an error if this assumption fails. | ||
72 | */ | ||
73 | #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ | ||
74 | ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) | ||
75 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" | ||
76 | #endif | ||
77 | |||
78 | /* | ||
79 | * parisc wants ENOTSUP instead of EOPNOTSUPP | ||
80 | */ | ||
81 | #ifndef ENOTSUP | ||
82 | # define ENANOSLEEP_NOTSUP EOPNOTSUPP | ||
83 | #else | ||
84 | # define ENANOSLEEP_NOTSUP ENOTSUP | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * The timer ID is turned into a timer address by idr_find(). | ||
89 | * Verifying a valid ID consists of: | ||
90 | * | ||
91 | * a) checking that idr_find() returns other than -1. | ||
92 | * b) checking that the timer id matches the one in the timer itself. | ||
93 | * c) that the timer owner is in the callers thread group. | ||
94 | */ | ||
95 | |||
96 | /* | ||
97 | * CLOCKs: The POSIX standard calls for a couple of clocks and allows us | ||
98 | * to implement others. This structure defines the various | ||
99 | * clocks. | ||
100 | * | ||
101 | * RESOLUTION: Clock resolution is used to round up timer and interval | ||
102 | * times, NOT to report clock times, which are reported with as | ||
103 | * much resolution as the system can muster. In some cases this | ||
104 | * resolution may depend on the underlying clock hardware and | ||
105 | * may not be quantifiable until run time, and only then is the | ||
106 | * necessary code is written. The standard says we should say | ||
107 | * something about this issue in the documentation... | ||
108 | * | ||
109 | * FUNCTIONS: The CLOCKs structure defines possible functions to | ||
110 | * handle various clock functions. | ||
111 | * | ||
112 | * The standard POSIX timer management code assumes the | ||
113 | * following: 1.) The k_itimer struct (sched.h) is used for | ||
114 | * the timer. 2.) The list, it_lock, it_clock, it_id and | ||
115 | * it_pid fields are not modified by timer code. | ||
116 | * | ||
117 | * Permissions: It is assumed that the clock_settime() function defined | ||
118 | * for each clock will take care of permission checks. Some | ||
119 | * clocks may be set able by any user (i.e. local process | ||
120 | * clocks) others not. Currently the only set able clock we | ||
121 | * have is CLOCK_REALTIME and its high res counter part, both of | ||
122 | * which we beg off on and pass to do_sys_settimeofday(). | ||
123 | */ | ||
124 | |||
125 | static struct k_clock posix_clocks[MAX_CLOCKS]; | ||
126 | |||
127 | /* | ||
128 | * These ones are defined below. | ||
129 | */ | ||
130 | static int common_nsleep(const clockid_t, int flags, struct timespec *t, | ||
131 | struct timespec __user *rmtp); | ||
132 | static int common_timer_create(struct k_itimer *new_timer); | ||
133 | static void common_timer_get(struct k_itimer *, struct itimerspec *); | ||
134 | static int common_timer_set(struct k_itimer *, int, | ||
135 | struct itimerspec *, struct itimerspec *); | ||
136 | static int common_timer_del(struct k_itimer *timer); | ||
137 | |||
138 | static enum hrtimer_restart posix_timer_fn(struct hrtimer *data); | ||
139 | |||
140 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); | ||
141 | |||
142 | #define lock_timer(tid, flags) \ | ||
143 | ({ struct k_itimer *__timr; \ | ||
144 | __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ | ||
145 | __timr; \ | ||
146 | }) | ||
147 | |||
148 | static int hash(struct signal_struct *sig, unsigned int nr) | ||
149 | { | ||
150 | return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); | ||
151 | } | ||
152 | |||
153 | static struct k_itimer *__posix_timers_find(struct hlist_head *head, | ||
154 | struct signal_struct *sig, | ||
155 | timer_t id) | ||
156 | { | ||
157 | struct k_itimer *timer; | ||
158 | |||
159 | hlist_for_each_entry_rcu(timer, head, t_hash) { | ||
160 | if ((timer->it_signal == sig) && (timer->it_id == id)) | ||
161 | return timer; | ||
162 | } | ||
163 | return NULL; | ||
164 | } | ||
165 | |||
166 | static struct k_itimer *posix_timer_by_id(timer_t id) | ||
167 | { | ||
168 | struct signal_struct *sig = current->signal; | ||
169 | struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; | ||
170 | |||
171 | return __posix_timers_find(head, sig, id); | ||
172 | } | ||
173 | |||
174 | static int posix_timer_add(struct k_itimer *timer) | ||
175 | { | ||
176 | struct signal_struct *sig = current->signal; | ||
177 | int first_free_id = sig->posix_timer_id; | ||
178 | struct hlist_head *head; | ||
179 | int ret = -ENOENT; | ||
180 | |||
181 | do { | ||
182 | spin_lock(&hash_lock); | ||
183 | head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; | ||
184 | if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { | ||
185 | hlist_add_head_rcu(&timer->t_hash, head); | ||
186 | ret = sig->posix_timer_id; | ||
187 | } | ||
188 | if (++sig->posix_timer_id < 0) | ||
189 | sig->posix_timer_id = 0; | ||
190 | if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) | ||
191 | /* Loop over all possible ids completed */ | ||
192 | ret = -EAGAIN; | ||
193 | spin_unlock(&hash_lock); | ||
194 | } while (ret == -ENOENT); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) | ||
199 | { | ||
200 | spin_unlock_irqrestore(&timr->it_lock, flags); | ||
201 | } | ||
202 | |||
203 | /* Get clock_realtime */ | ||
204 | static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp) | ||
205 | { | ||
206 | ktime_get_real_ts(tp); | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* Set clock_realtime */ | ||
211 | static int posix_clock_realtime_set(const clockid_t which_clock, | ||
212 | const struct timespec *tp) | ||
213 | { | ||
214 | return do_sys_settimeofday(tp, NULL); | ||
215 | } | ||
216 | |||
217 | static int posix_clock_realtime_adj(const clockid_t which_clock, | ||
218 | struct timex *t) | ||
219 | { | ||
220 | return do_adjtimex(t); | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Get monotonic time for posix timers | ||
225 | */ | ||
226 | static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) | ||
227 | { | ||
228 | ktime_get_ts(tp); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Get monotonic-raw time for posix timers | ||
234 | */ | ||
235 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) | ||
236 | { | ||
237 | getrawmonotonic(tp); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | |||
242 | static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) | ||
243 | { | ||
244 | *tp = current_kernel_time(); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int posix_get_monotonic_coarse(clockid_t which_clock, | ||
249 | struct timespec *tp) | ||
250 | { | ||
251 | *tp = get_monotonic_coarse(); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) | ||
256 | { | ||
257 | *tp = ktime_to_timespec(KTIME_LOW_RES); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp) | ||
262 | { | ||
263 | get_monotonic_boottime(tp); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int posix_get_tai(clockid_t which_clock, struct timespec *tp) | ||
268 | { | ||
269 | timekeeping_clocktai(tp); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Initialize everything, well, just everything in Posix clocks/timers ;) | ||
275 | */ | ||
276 | static __init int init_posix_timers(void) | ||
277 | { | ||
278 | struct k_clock clock_realtime = { | ||
279 | .clock_getres = hrtimer_get_res, | ||
280 | .clock_get = posix_clock_realtime_get, | ||
281 | .clock_set = posix_clock_realtime_set, | ||
282 | .clock_adj = posix_clock_realtime_adj, | ||
283 | .nsleep = common_nsleep, | ||
284 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
285 | .timer_create = common_timer_create, | ||
286 | .timer_set = common_timer_set, | ||
287 | .timer_get = common_timer_get, | ||
288 | .timer_del = common_timer_del, | ||
289 | }; | ||
290 | struct k_clock clock_monotonic = { | ||
291 | .clock_getres = hrtimer_get_res, | ||
292 | .clock_get = posix_ktime_get_ts, | ||
293 | .nsleep = common_nsleep, | ||
294 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
295 | .timer_create = common_timer_create, | ||
296 | .timer_set = common_timer_set, | ||
297 | .timer_get = common_timer_get, | ||
298 | .timer_del = common_timer_del, | ||
299 | }; | ||
300 | struct k_clock clock_monotonic_raw = { | ||
301 | .clock_getres = hrtimer_get_res, | ||
302 | .clock_get = posix_get_monotonic_raw, | ||
303 | }; | ||
304 | struct k_clock clock_realtime_coarse = { | ||
305 | .clock_getres = posix_get_coarse_res, | ||
306 | .clock_get = posix_get_realtime_coarse, | ||
307 | }; | ||
308 | struct k_clock clock_monotonic_coarse = { | ||
309 | .clock_getres = posix_get_coarse_res, | ||
310 | .clock_get = posix_get_monotonic_coarse, | ||
311 | }; | ||
312 | struct k_clock clock_tai = { | ||
313 | .clock_getres = hrtimer_get_res, | ||
314 | .clock_get = posix_get_tai, | ||
315 | .nsleep = common_nsleep, | ||
316 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
317 | .timer_create = common_timer_create, | ||
318 | .timer_set = common_timer_set, | ||
319 | .timer_get = common_timer_get, | ||
320 | .timer_del = common_timer_del, | ||
321 | }; | ||
322 | struct k_clock clock_boottime = { | ||
323 | .clock_getres = hrtimer_get_res, | ||
324 | .clock_get = posix_get_boottime, | ||
325 | .nsleep = common_nsleep, | ||
326 | .nsleep_restart = hrtimer_nanosleep_restart, | ||
327 | .timer_create = common_timer_create, | ||
328 | .timer_set = common_timer_set, | ||
329 | .timer_get = common_timer_get, | ||
330 | .timer_del = common_timer_del, | ||
331 | }; | ||
332 | |||
333 | posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime); | ||
334 | posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic); | ||
335 | posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); | ||
336 | posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); | ||
337 | posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); | ||
338 | posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime); | ||
339 | posix_timers_register_clock(CLOCK_TAI, &clock_tai); | ||
340 | |||
341 | posix_timers_cache = kmem_cache_create("posix_timers_cache", | ||
342 | sizeof (struct k_itimer), 0, SLAB_PANIC, | ||
343 | NULL); | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | __initcall(init_posix_timers); | ||
348 | |||
349 | static void schedule_next_timer(struct k_itimer *timr) | ||
350 | { | ||
351 | struct hrtimer *timer = &timr->it.real.timer; | ||
352 | |||
353 | if (timr->it.real.interval.tv64 == 0) | ||
354 | return; | ||
355 | |||
356 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, | ||
357 | timer->base->get_time(), | ||
358 | timr->it.real.interval); | ||
359 | |||
360 | timr->it_overrun_last = timr->it_overrun; | ||
361 | timr->it_overrun = -1; | ||
362 | ++timr->it_requeue_pending; | ||
363 | hrtimer_restart(timer); | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * This function is exported for use by the signal deliver code. It is | ||
368 | * called just prior to the info block being released and passes that | ||
369 | * block to us. It's function is to update the overrun entry AND to | ||
370 | * restart the timer. It should only be called if the timer is to be | ||
371 | * restarted (i.e. we have flagged this in the sys_private entry of the | ||
372 | * info block). | ||
373 | * | ||
374 | * To protect against the timer going away while the interrupt is queued, | ||
375 | * we require that the it_requeue_pending flag be set. | ||
376 | */ | ||
377 | void do_schedule_next_timer(struct siginfo *info) | ||
378 | { | ||
379 | struct k_itimer *timr; | ||
380 | unsigned long flags; | ||
381 | |||
382 | timr = lock_timer(info->si_tid, &flags); | ||
383 | |||
384 | if (timr && timr->it_requeue_pending == info->si_sys_private) { | ||
385 | if (timr->it_clock < 0) | ||
386 | posix_cpu_timer_schedule(timr); | ||
387 | else | ||
388 | schedule_next_timer(timr); | ||
389 | |||
390 | info->si_overrun += timr->it_overrun_last; | ||
391 | } | ||
392 | |||
393 | if (timr) | ||
394 | unlock_timer(timr, flags); | ||
395 | } | ||
396 | |||
397 | int posix_timer_event(struct k_itimer *timr, int si_private) | ||
398 | { | ||
399 | struct task_struct *task; | ||
400 | int shared, ret = -1; | ||
401 | /* | ||
402 | * FIXME: if ->sigq is queued we can race with | ||
403 | * dequeue_signal()->do_schedule_next_timer(). | ||
404 | * | ||
405 | * If dequeue_signal() sees the "right" value of | ||
406 | * si_sys_private it calls do_schedule_next_timer(). | ||
407 | * We re-queue ->sigq and drop ->it_lock(). | ||
408 | * do_schedule_next_timer() locks the timer | ||
409 | * and re-schedules it while ->sigq is pending. | ||
410 | * Not really bad, but not that we want. | ||
411 | */ | ||
412 | timr->sigq->info.si_sys_private = si_private; | ||
413 | |||
414 | rcu_read_lock(); | ||
415 | task = pid_task(timr->it_pid, PIDTYPE_PID); | ||
416 | if (task) { | ||
417 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); | ||
418 | ret = send_sigqueue(timr->sigq, task, shared); | ||
419 | } | ||
420 | rcu_read_unlock(); | ||
421 | /* If we failed to send the signal the timer stops. */ | ||
422 | return ret > 0; | ||
423 | } | ||
424 | EXPORT_SYMBOL_GPL(posix_timer_event); | ||
425 | |||
426 | /* | ||
427 | * This function gets called when a POSIX.1b interval timer expires. It | ||
428 | * is used as a callback from the kernel internal timer. The | ||
429 | * run_timer_list code ALWAYS calls with interrupts on. | ||
430 | |||
431 | * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. | ||
432 | */ | ||
433 | static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) | ||
434 | { | ||
435 | struct k_itimer *timr; | ||
436 | unsigned long flags; | ||
437 | int si_private = 0; | ||
438 | enum hrtimer_restart ret = HRTIMER_NORESTART; | ||
439 | |||
440 | timr = container_of(timer, struct k_itimer, it.real.timer); | ||
441 | spin_lock_irqsave(&timr->it_lock, flags); | ||
442 | |||
443 | if (timr->it.real.interval.tv64 != 0) | ||
444 | si_private = ++timr->it_requeue_pending; | ||
445 | |||
446 | if (posix_timer_event(timr, si_private)) { | ||
447 | /* | ||
448 | * signal was not sent because of sig_ignor | ||
449 | * we will not get a call back to restart it AND | ||
450 | * it should be restarted. | ||
451 | */ | ||
452 | if (timr->it.real.interval.tv64 != 0) { | ||
453 | ktime_t now = hrtimer_cb_get_time(timer); | ||
454 | |||
455 | /* | ||
456 | * FIXME: What we really want, is to stop this | ||
457 | * timer completely and restart it in case the | ||
458 | * SIG_IGN is removed. This is a non trivial | ||
459 | * change which involves sighand locking | ||
460 | * (sigh !), which we don't want to do late in | ||
461 | * the release cycle. | ||
462 | * | ||
463 | * For now we just let timers with an interval | ||
464 | * less than a jiffie expire every jiffie to | ||
465 | * avoid softirq starvation in case of SIG_IGN | ||
466 | * and a very small interval, which would put | ||
467 | * the timer right back on the softirq pending | ||
468 | * list. By moving now ahead of time we trick | ||
469 | * hrtimer_forward() to expire the timer | ||
470 | * later, while we still maintain the overrun | ||
471 | * accuracy, but have some inconsistency in | ||
472 | * the timer_gettime() case. This is at least | ||
473 | * better than a starved softirq. A more | ||
474 | * complex fix which solves also another related | ||
475 | * inconsistency is already in the pipeline. | ||
476 | */ | ||
477 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
478 | { | ||
479 | ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); | ||
480 | |||
481 | if (timr->it.real.interval.tv64 < kj.tv64) | ||
482 | now = ktime_add(now, kj); | ||
483 | } | ||
484 | #endif | ||
485 | timr->it_overrun += (unsigned int) | ||
486 | hrtimer_forward(timer, now, | ||
487 | timr->it.real.interval); | ||
488 | ret = HRTIMER_RESTART; | ||
489 | ++timr->it_requeue_pending; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | unlock_timer(timr, flags); | ||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | static struct pid *good_sigevent(sigevent_t * event) | ||
498 | { | ||
499 | struct task_struct *rtn = current->group_leader; | ||
500 | |||
501 | if ((event->sigev_notify & SIGEV_THREAD_ID ) && | ||
502 | (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || | ||
503 | !same_thread_group(rtn, current) || | ||
504 | (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) | ||
505 | return NULL; | ||
506 | |||
507 | if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && | ||
508 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) | ||
509 | return NULL; | ||
510 | |||
511 | return task_pid(rtn); | ||
512 | } | ||
513 | |||
514 | void posix_timers_register_clock(const clockid_t clock_id, | ||
515 | struct k_clock *new_clock) | ||
516 | { | ||
517 | if ((unsigned) clock_id >= MAX_CLOCKS) { | ||
518 | printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n", | ||
519 | clock_id); | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | if (!new_clock->clock_get) { | ||
524 | printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n", | ||
525 | clock_id); | ||
526 | return; | ||
527 | } | ||
528 | if (!new_clock->clock_getres) { | ||
529 | printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n", | ||
530 | clock_id); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | posix_clocks[clock_id] = *new_clock; | ||
535 | } | ||
536 | EXPORT_SYMBOL_GPL(posix_timers_register_clock); | ||
537 | |||
538 | static struct k_itimer * alloc_posix_timer(void) | ||
539 | { | ||
540 | struct k_itimer *tmr; | ||
541 | tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); | ||
542 | if (!tmr) | ||
543 | return tmr; | ||
544 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { | ||
545 | kmem_cache_free(posix_timers_cache, tmr); | ||
546 | return NULL; | ||
547 | } | ||
548 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); | ||
549 | return tmr; | ||
550 | } | ||
551 | |||
552 | static void k_itimer_rcu_free(struct rcu_head *head) | ||
553 | { | ||
554 | struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); | ||
555 | |||
556 | kmem_cache_free(posix_timers_cache, tmr); | ||
557 | } | ||
558 | |||
559 | #define IT_ID_SET 1 | ||
560 | #define IT_ID_NOT_SET 0 | ||
561 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | ||
562 | { | ||
563 | if (it_id_set) { | ||
564 | unsigned long flags; | ||
565 | spin_lock_irqsave(&hash_lock, flags); | ||
566 | hlist_del_rcu(&tmr->t_hash); | ||
567 | spin_unlock_irqrestore(&hash_lock, flags); | ||
568 | } | ||
569 | put_pid(tmr->it_pid); | ||
570 | sigqueue_free(tmr->sigq); | ||
571 | call_rcu(&tmr->it.rcu, k_itimer_rcu_free); | ||
572 | } | ||
573 | |||
574 | static struct k_clock *clockid_to_kclock(const clockid_t id) | ||
575 | { | ||
576 | if (id < 0) | ||
577 | return (id & CLOCKFD_MASK) == CLOCKFD ? | ||
578 | &clock_posix_dynamic : &clock_posix_cpu; | ||
579 | |||
580 | if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) | ||
581 | return NULL; | ||
582 | return &posix_clocks[id]; | ||
583 | } | ||
584 | |||
585 | static int common_timer_create(struct k_itimer *new_timer) | ||
586 | { | ||
587 | hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | /* Create a POSIX.1b interval timer. */ | ||
592 | |||
593 | SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | ||
594 | struct sigevent __user *, timer_event_spec, | ||
595 | timer_t __user *, created_timer_id) | ||
596 | { | ||
597 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
598 | struct k_itimer *new_timer; | ||
599 | int error, new_timer_id; | ||
600 | sigevent_t event; | ||
601 | int it_id_set = IT_ID_NOT_SET; | ||
602 | |||
603 | if (!kc) | ||
604 | return -EINVAL; | ||
605 | if (!kc->timer_create) | ||
606 | return -EOPNOTSUPP; | ||
607 | |||
608 | new_timer = alloc_posix_timer(); | ||
609 | if (unlikely(!new_timer)) | ||
610 | return -EAGAIN; | ||
611 | |||
612 | spin_lock_init(&new_timer->it_lock); | ||
613 | new_timer_id = posix_timer_add(new_timer); | ||
614 | if (new_timer_id < 0) { | ||
615 | error = new_timer_id; | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | it_id_set = IT_ID_SET; | ||
620 | new_timer->it_id = (timer_t) new_timer_id; | ||
621 | new_timer->it_clock = which_clock; | ||
622 | new_timer->it_overrun = -1; | ||
623 | |||
624 | if (timer_event_spec) { | ||
625 | if (copy_from_user(&event, timer_event_spec, sizeof (event))) { | ||
626 | error = -EFAULT; | ||
627 | goto out; | ||
628 | } | ||
629 | rcu_read_lock(); | ||
630 | new_timer->it_pid = get_pid(good_sigevent(&event)); | ||
631 | rcu_read_unlock(); | ||
632 | if (!new_timer->it_pid) { | ||
633 | error = -EINVAL; | ||
634 | goto out; | ||
635 | } | ||
636 | } else { | ||
637 | event.sigev_notify = SIGEV_SIGNAL; | ||
638 | event.sigev_signo = SIGALRM; | ||
639 | event.sigev_value.sival_int = new_timer->it_id; | ||
640 | new_timer->it_pid = get_pid(task_tgid(current)); | ||
641 | } | ||
642 | |||
643 | new_timer->it_sigev_notify = event.sigev_notify; | ||
644 | new_timer->sigq->info.si_signo = event.sigev_signo; | ||
645 | new_timer->sigq->info.si_value = event.sigev_value; | ||
646 | new_timer->sigq->info.si_tid = new_timer->it_id; | ||
647 | new_timer->sigq->info.si_code = SI_TIMER; | ||
648 | |||
649 | if (copy_to_user(created_timer_id, | ||
650 | &new_timer_id, sizeof (new_timer_id))) { | ||
651 | error = -EFAULT; | ||
652 | goto out; | ||
653 | } | ||
654 | |||
655 | error = kc->timer_create(new_timer); | ||
656 | if (error) | ||
657 | goto out; | ||
658 | |||
659 | spin_lock_irq(¤t->sighand->siglock); | ||
660 | new_timer->it_signal = current->signal; | ||
661 | list_add(&new_timer->list, ¤t->signal->posix_timers); | ||
662 | spin_unlock_irq(¤t->sighand->siglock); | ||
663 | |||
664 | return 0; | ||
665 | /* | ||
666 | * In the case of the timer belonging to another task, after | ||
667 | * the task is unlocked, the timer is owned by the other task | ||
668 | * and may cease to exist at any time. Don't use or modify | ||
669 | * new_timer after the unlock call. | ||
670 | */ | ||
671 | out: | ||
672 | release_posix_timer(new_timer, it_id_set); | ||
673 | return error; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Locking issues: We need to protect the result of the id look up until | ||
678 | * we get the timer locked down so it is not deleted under us. The | ||
679 | * removal is done under the idr spinlock so we use that here to bridge | ||
680 | * the find to the timer lock. To avoid a dead lock, the timer id MUST | ||
681 | * be release with out holding the timer lock. | ||
682 | */ | ||
683 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) | ||
684 | { | ||
685 | struct k_itimer *timr; | ||
686 | |||
687 | /* | ||
688 | * timer_t could be any type >= int and we want to make sure any | ||
689 | * @timer_id outside positive int range fails lookup. | ||
690 | */ | ||
691 | if ((unsigned long long)timer_id > INT_MAX) | ||
692 | return NULL; | ||
693 | |||
694 | rcu_read_lock(); | ||
695 | timr = posix_timer_by_id(timer_id); | ||
696 | if (timr) { | ||
697 | spin_lock_irqsave(&timr->it_lock, *flags); | ||
698 | if (timr->it_signal == current->signal) { | ||
699 | rcu_read_unlock(); | ||
700 | return timr; | ||
701 | } | ||
702 | spin_unlock_irqrestore(&timr->it_lock, *flags); | ||
703 | } | ||
704 | rcu_read_unlock(); | ||
705 | |||
706 | return NULL; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * Get the time remaining on a POSIX.1b interval timer. This function | ||
711 | * is ALWAYS called with spin_lock_irq on the timer, thus it must not | ||
712 | * mess with irq. | ||
713 | * | ||
714 | * We have a couple of messes to clean up here. First there is the case | ||
715 | * of a timer that has a requeue pending. These timers should appear to | ||
716 | * be in the timer list with an expiry as if we were to requeue them | ||
717 | * now. | ||
718 | * | ||
719 | * The second issue is the SIGEV_NONE timer which may be active but is | ||
720 | * not really ever put in the timer list (to save system resources). | ||
721 | * This timer may be expired, and if so, we will do it here. Otherwise | ||
722 | * it is the same as a requeue pending timer WRT to what we should | ||
723 | * report. | ||
724 | */ | ||
725 | static void | ||
726 | common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | ||
727 | { | ||
728 | ktime_t now, remaining, iv; | ||
729 | struct hrtimer *timer = &timr->it.real.timer; | ||
730 | |||
731 | memset(cur_setting, 0, sizeof(struct itimerspec)); | ||
732 | |||
733 | iv = timr->it.real.interval; | ||
734 | |||
735 | /* interval timer ? */ | ||
736 | if (iv.tv64) | ||
737 | cur_setting->it_interval = ktime_to_timespec(iv); | ||
738 | else if (!hrtimer_active(timer) && | ||
739 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | ||
740 | return; | ||
741 | |||
742 | now = timer->base->get_time(); | ||
743 | |||
744 | /* | ||
745 | * When a requeue is pending or this is a SIGEV_NONE | ||
746 | * timer move the expiry time forward by intervals, so | ||
747 | * expiry is > now. | ||
748 | */ | ||
749 | if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || | ||
750 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) | ||
751 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); | ||
752 | |||
753 | remaining = ktime_sub(hrtimer_get_expires(timer), now); | ||
754 | /* Return 0 only, when the timer is expired and not pending */ | ||
755 | if (remaining.tv64 <= 0) { | ||
756 | /* | ||
757 | * A single shot SIGEV_NONE timer must return 0, when | ||
758 | * it is expired ! | ||
759 | */ | ||
760 | if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | ||
761 | cur_setting->it_value.tv_nsec = 1; | ||
762 | } else | ||
763 | cur_setting->it_value = ktime_to_timespec(remaining); | ||
764 | } | ||
765 | |||
766 | /* Get the time remaining on a POSIX.1b interval timer. */ | ||
767 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, | ||
768 | struct itimerspec __user *, setting) | ||
769 | { | ||
770 | struct itimerspec cur_setting; | ||
771 | struct k_itimer *timr; | ||
772 | struct k_clock *kc; | ||
773 | unsigned long flags; | ||
774 | int ret = 0; | ||
775 | |||
776 | timr = lock_timer(timer_id, &flags); | ||
777 | if (!timr) | ||
778 | return -EINVAL; | ||
779 | |||
780 | kc = clockid_to_kclock(timr->it_clock); | ||
781 | if (WARN_ON_ONCE(!kc || !kc->timer_get)) | ||
782 | ret = -EINVAL; | ||
783 | else | ||
784 | kc->timer_get(timr, &cur_setting); | ||
785 | |||
786 | unlock_timer(timr, flags); | ||
787 | |||
788 | if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) | ||
789 | return -EFAULT; | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | /* | ||
795 | * Get the number of overruns of a POSIX.1b interval timer. This is to | ||
796 | * be the overrun of the timer last delivered. At the same time we are | ||
797 | * accumulating overruns on the next timer. The overrun is frozen when | ||
798 | * the signal is delivered, either at the notify time (if the info block | ||
799 | * is not queued) or at the actual delivery time (as we are informed by | ||
800 | * the call back to do_schedule_next_timer(). So all we need to do is | ||
801 | * to pick up the frozen overrun. | ||
802 | */ | ||
803 | SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) | ||
804 | { | ||
805 | struct k_itimer *timr; | ||
806 | int overrun; | ||
807 | unsigned long flags; | ||
808 | |||
809 | timr = lock_timer(timer_id, &flags); | ||
810 | if (!timr) | ||
811 | return -EINVAL; | ||
812 | |||
813 | overrun = timr->it_overrun_last; | ||
814 | unlock_timer(timr, flags); | ||
815 | |||
816 | return overrun; | ||
817 | } | ||
818 | |||
819 | /* Set a POSIX.1b interval timer. */ | ||
820 | /* timr->it_lock is taken. */ | ||
821 | static int | ||
822 | common_timer_set(struct k_itimer *timr, int flags, | ||
823 | struct itimerspec *new_setting, struct itimerspec *old_setting) | ||
824 | { | ||
825 | struct hrtimer *timer = &timr->it.real.timer; | ||
826 | enum hrtimer_mode mode; | ||
827 | |||
828 | if (old_setting) | ||
829 | common_timer_get(timr, old_setting); | ||
830 | |||
831 | /* disable the timer */ | ||
832 | timr->it.real.interval.tv64 = 0; | ||
833 | /* | ||
834 | * careful here. If smp we could be in the "fire" routine which will | ||
835 | * be spinning as we hold the lock. But this is ONLY an SMP issue. | ||
836 | */ | ||
837 | if (hrtimer_try_to_cancel(timer) < 0) | ||
838 | return TIMER_RETRY; | ||
839 | |||
840 | timr->it_requeue_pending = (timr->it_requeue_pending + 2) & | ||
841 | ~REQUEUE_PENDING; | ||
842 | timr->it_overrun_last = 0; | ||
843 | |||
844 | /* switch off the timer when it_value is zero */ | ||
845 | if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) | ||
846 | return 0; | ||
847 | |||
848 | mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; | ||
849 | hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); | ||
850 | timr->it.real.timer.function = posix_timer_fn; | ||
851 | |||
852 | hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value)); | ||
853 | |||
854 | /* Convert interval */ | ||
855 | timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); | ||
856 | |||
857 | /* SIGEV_NONE timers are not queued ! See common_timer_get */ | ||
858 | if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { | ||
859 | /* Setup correct expiry time for relative timers */ | ||
860 | if (mode == HRTIMER_MODE_REL) { | ||
861 | hrtimer_add_expires(timer, timer->base->get_time()); | ||
862 | } | ||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | hrtimer_start_expires(timer, mode); | ||
867 | return 0; | ||
868 | } | ||
869 | |||
870 | /* Set a POSIX.1b interval timer */ | ||
871 | SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, | ||
872 | const struct itimerspec __user *, new_setting, | ||
873 | struct itimerspec __user *, old_setting) | ||
874 | { | ||
875 | struct k_itimer *timr; | ||
876 | struct itimerspec new_spec, old_spec; | ||
877 | int error = 0; | ||
878 | unsigned long flag; | ||
879 | struct itimerspec *rtn = old_setting ? &old_spec : NULL; | ||
880 | struct k_clock *kc; | ||
881 | |||
882 | if (!new_setting) | ||
883 | return -EINVAL; | ||
884 | |||
885 | if (copy_from_user(&new_spec, new_setting, sizeof (new_spec))) | ||
886 | return -EFAULT; | ||
887 | |||
888 | if (!timespec_valid(&new_spec.it_interval) || | ||
889 | !timespec_valid(&new_spec.it_value)) | ||
890 | return -EINVAL; | ||
891 | retry: | ||
892 | timr = lock_timer(timer_id, &flag); | ||
893 | if (!timr) | ||
894 | return -EINVAL; | ||
895 | |||
896 | kc = clockid_to_kclock(timr->it_clock); | ||
897 | if (WARN_ON_ONCE(!kc || !kc->timer_set)) | ||
898 | error = -EINVAL; | ||
899 | else | ||
900 | error = kc->timer_set(timr, flags, &new_spec, rtn); | ||
901 | |||
902 | unlock_timer(timr, flag); | ||
903 | if (error == TIMER_RETRY) { | ||
904 | rtn = NULL; // We already got the old time... | ||
905 | goto retry; | ||
906 | } | ||
907 | |||
908 | if (old_setting && !error && | ||
909 | copy_to_user(old_setting, &old_spec, sizeof (old_spec))) | ||
910 | error = -EFAULT; | ||
911 | |||
912 | return error; | ||
913 | } | ||
914 | |||
915 | static int common_timer_del(struct k_itimer *timer) | ||
916 | { | ||
917 | timer->it.real.interval.tv64 = 0; | ||
918 | |||
919 | if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) | ||
920 | return TIMER_RETRY; | ||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | static inline int timer_delete_hook(struct k_itimer *timer) | ||
925 | { | ||
926 | struct k_clock *kc = clockid_to_kclock(timer->it_clock); | ||
927 | |||
928 | if (WARN_ON_ONCE(!kc || !kc->timer_del)) | ||
929 | return -EINVAL; | ||
930 | return kc->timer_del(timer); | ||
931 | } | ||
932 | |||
933 | /* Delete a POSIX.1b interval timer. */ | ||
934 | SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) | ||
935 | { | ||
936 | struct k_itimer *timer; | ||
937 | unsigned long flags; | ||
938 | |||
939 | retry_delete: | ||
940 | timer = lock_timer(timer_id, &flags); | ||
941 | if (!timer) | ||
942 | return -EINVAL; | ||
943 | |||
944 | if (timer_delete_hook(timer) == TIMER_RETRY) { | ||
945 | unlock_timer(timer, flags); | ||
946 | goto retry_delete; | ||
947 | } | ||
948 | |||
949 | spin_lock(¤t->sighand->siglock); | ||
950 | list_del(&timer->list); | ||
951 | spin_unlock(¤t->sighand->siglock); | ||
952 | /* | ||
953 | * This keeps any tasks waiting on the spin lock from thinking | ||
954 | * they got something (see the lock code above). | ||
955 | */ | ||
956 | timer->it_signal = NULL; | ||
957 | |||
958 | unlock_timer(timer, flags); | ||
959 | release_posix_timer(timer, IT_ID_SET); | ||
960 | return 0; | ||
961 | } | ||
962 | |||
963 | /* | ||
964 | * return timer owned by the process, used by exit_itimers | ||
965 | */ | ||
966 | static void itimer_delete(struct k_itimer *timer) | ||
967 | { | ||
968 | unsigned long flags; | ||
969 | |||
970 | retry_delete: | ||
971 | spin_lock_irqsave(&timer->it_lock, flags); | ||
972 | |||
973 | if (timer_delete_hook(timer) == TIMER_RETRY) { | ||
974 | unlock_timer(timer, flags); | ||
975 | goto retry_delete; | ||
976 | } | ||
977 | list_del(&timer->list); | ||
978 | /* | ||
979 | * This keeps any tasks waiting on the spin lock from thinking | ||
980 | * they got something (see the lock code above). | ||
981 | */ | ||
982 | timer->it_signal = NULL; | ||
983 | |||
984 | unlock_timer(timer, flags); | ||
985 | release_posix_timer(timer, IT_ID_SET); | ||
986 | } | ||
987 | |||
988 | /* | ||
989 | * This is called by do_exit or de_thread, only when there are no more | ||
990 | * references to the shared signal_struct. | ||
991 | */ | ||
992 | void exit_itimers(struct signal_struct *sig) | ||
993 | { | ||
994 | struct k_itimer *tmr; | ||
995 | |||
996 | while (!list_empty(&sig->posix_timers)) { | ||
997 | tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); | ||
998 | itimer_delete(tmr); | ||
999 | } | ||
1000 | } | ||
1001 | |||
1002 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, | ||
1003 | const struct timespec __user *, tp) | ||
1004 | { | ||
1005 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1006 | struct timespec new_tp; | ||
1007 | |||
1008 | if (!kc || !kc->clock_set) | ||
1009 | return -EINVAL; | ||
1010 | |||
1011 | if (copy_from_user(&new_tp, tp, sizeof (*tp))) | ||
1012 | return -EFAULT; | ||
1013 | |||
1014 | return kc->clock_set(which_clock, &new_tp); | ||
1015 | } | ||
1016 | |||
1017 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, | ||
1018 | struct timespec __user *,tp) | ||
1019 | { | ||
1020 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1021 | struct timespec kernel_tp; | ||
1022 | int error; | ||
1023 | |||
1024 | if (!kc) | ||
1025 | return -EINVAL; | ||
1026 | |||
1027 | error = kc->clock_get(which_clock, &kernel_tp); | ||
1028 | |||
1029 | if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) | ||
1030 | error = -EFAULT; | ||
1031 | |||
1032 | return error; | ||
1033 | } | ||
1034 | |||
1035 | SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, | ||
1036 | struct timex __user *, utx) | ||
1037 | { | ||
1038 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1039 | struct timex ktx; | ||
1040 | int err; | ||
1041 | |||
1042 | if (!kc) | ||
1043 | return -EINVAL; | ||
1044 | if (!kc->clock_adj) | ||
1045 | return -EOPNOTSUPP; | ||
1046 | |||
1047 | if (copy_from_user(&ktx, utx, sizeof(ktx))) | ||
1048 | return -EFAULT; | ||
1049 | |||
1050 | err = kc->clock_adj(which_clock, &ktx); | ||
1051 | |||
1052 | if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) | ||
1053 | return -EFAULT; | ||
1054 | |||
1055 | return err; | ||
1056 | } | ||
1057 | |||
1058 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, | ||
1059 | struct timespec __user *, tp) | ||
1060 | { | ||
1061 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1062 | struct timespec rtn_tp; | ||
1063 | int error; | ||
1064 | |||
1065 | if (!kc) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | error = kc->clock_getres(which_clock, &rtn_tp); | ||
1069 | |||
1070 | if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) | ||
1071 | error = -EFAULT; | ||
1072 | |||
1073 | return error; | ||
1074 | } | ||
1075 | |||
1076 | /* | ||
1077 | * nanosleep for monotonic and realtime clocks | ||
1078 | */ | ||
1079 | static int common_nsleep(const clockid_t which_clock, int flags, | ||
1080 | struct timespec *tsave, struct timespec __user *rmtp) | ||
1081 | { | ||
1082 | return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? | ||
1083 | HRTIMER_MODE_ABS : HRTIMER_MODE_REL, | ||
1084 | which_clock); | ||
1085 | } | ||
1086 | |||
1087 | SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | ||
1088 | const struct timespec __user *, rqtp, | ||
1089 | struct timespec __user *, rmtp) | ||
1090 | { | ||
1091 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1092 | struct timespec t; | ||
1093 | |||
1094 | if (!kc) | ||
1095 | return -EINVAL; | ||
1096 | if (!kc->nsleep) | ||
1097 | return -ENANOSLEEP_NOTSUP; | ||
1098 | |||
1099 | if (copy_from_user(&t, rqtp, sizeof (struct timespec))) | ||
1100 | return -EFAULT; | ||
1101 | |||
1102 | if (!timespec_valid(&t)) | ||
1103 | return -EINVAL; | ||
1104 | |||
1105 | return kc->nsleep(which_clock, flags, &t, rmtp); | ||
1106 | } | ||
1107 | |||
1108 | /* | ||
1109 | * This will restart clock_nanosleep. This is required only by | ||
1110 | * compat_clock_nanosleep_restart for now. | ||
1111 | */ | ||
1112 | long clock_nanosleep_restart(struct restart_block *restart_block) | ||
1113 | { | ||
1114 | clockid_t which_clock = restart_block->nanosleep.clockid; | ||
1115 | struct k_clock *kc = clockid_to_kclock(which_clock); | ||
1116 | |||
1117 | if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) | ||
1118 | return -EINVAL; | ||
1119 | |||
1120 | return kc->nsleep_restart(restart_block); | ||
1121 | } | ||