aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r--kernel/posix-timers.c89
1 files changed, 45 insertions, 44 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5e79c662294b..052ec4d195c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
116 * must supply functions here, even if the function just returns 116 * must supply functions here, even if the function just returns
117 * ENOSYS. The standard POSIX timer management code assumes the 117 * ENOSYS. The standard POSIX timer management code assumes the
118 * following: 1.) The k_itimer struct (sched.h) is used for the 118 * following: 1.) The k_itimer struct (sched.h) is used for the
119 * timer. 2.) The list, it_lock, it_clock, it_id and it_process 119 * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
120 * fields are not modified by timer code. 120 * fields are not modified by timer code.
121 * 121 *
122 * At this time all functions EXCEPT clock_nanosleep can be 122 * At this time all functions EXCEPT clock_nanosleep can be
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
197 return 0; 197 return 0;
198} 198}
199 199
200static int no_timer_create(struct k_itimer *new_timer)
201{
202 return -EOPNOTSUPP;
203}
204
200/* 205/*
201 * Return nonzero if we know a priori this clockid_t value is bogus. 206 * Return nonzero if we know a priori this clockid_t value is bogus.
202 */ 207 */
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
248 .clock_getres = hrtimer_get_res, 253 .clock_getres = hrtimer_get_res,
249 .clock_get = posix_get_monotonic_raw, 254 .clock_get = posix_get_monotonic_raw,
250 .clock_set = do_posix_clock_nosettime, 255 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create,
251 }; 257 };
252 258
253 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 259 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
@@ -313,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
313 319
314int posix_timer_event(struct k_itimer *timr, int si_private) 320int posix_timer_event(struct k_itimer *timr, int si_private)
315{ 321{
316 int shared, ret; 322 struct task_struct *task;
323 int shared, ret = -1;
317 /* 324 /*
318 * FIXME: if ->sigq is queued we can race with 325 * FIXME: if ->sigq is queued we can race with
319 * dequeue_signal()->do_schedule_next_timer(). 326 * dequeue_signal()->do_schedule_next_timer().
@@ -327,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
327 */ 334 */
328 timr->sigq->info.si_sys_private = si_private; 335 timr->sigq->info.si_sys_private = si_private;
329 336
330 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); 337 rcu_read_lock();
331 ret = send_sigqueue(timr->sigq, timr->it_process, shared); 338 task = pid_task(timr->it_pid, PIDTYPE_PID);
339 if (task) {
340 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
341 ret = send_sigqueue(timr->sigq, task, shared);
342 }
343 rcu_read_unlock();
332 /* If we failed to send the signal the timer stops. */ 344 /* If we failed to send the signal the timer stops. */
333 return ret > 0; 345 return ret > 0;
334} 346}
@@ -405,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
405 return ret; 417 return ret;
406} 418}
407 419
408static struct task_struct * good_sigevent(sigevent_t * event) 420static struct pid *good_sigevent(sigevent_t * event)
409{ 421{
410 struct task_struct *rtn = current->group_leader; 422 struct task_struct *rtn = current->group_leader;
411 423
@@ -419,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
419 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) 431 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
420 return NULL; 432 return NULL;
421 433
422 return rtn; 434 return task_pid(rtn);
423} 435}
424 436
425void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) 437void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -458,20 +470,19 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
458 idr_remove(&posix_timers_id, tmr->it_id); 470 idr_remove(&posix_timers_id, tmr->it_id);
459 spin_unlock_irqrestore(&idr_lock, flags); 471 spin_unlock_irqrestore(&idr_lock, flags);
460 } 472 }
473 put_pid(tmr->it_pid);
461 sigqueue_free(tmr->sigq); 474 sigqueue_free(tmr->sigq);
462 kmem_cache_free(posix_timers_cache, tmr); 475 kmem_cache_free(posix_timers_cache, tmr);
463} 476}
464 477
465/* Create a POSIX.1b interval timer. */ 478/* Create a POSIX.1b interval timer. */
466 479
467asmlinkage long 480SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
468sys_timer_create(const clockid_t which_clock, 481 struct sigevent __user *, timer_event_spec,
469 struct sigevent __user *timer_event_spec, 482 timer_t __user *, created_timer_id)
470 timer_t __user * created_timer_id)
471{ 483{
472 struct k_itimer *new_timer; 484 struct k_itimer *new_timer;
473 int error, new_timer_id; 485 int error, new_timer_id;
474 struct task_struct *process;
475 sigevent_t event; 486 sigevent_t event;
476 int it_id_set = IT_ID_NOT_SET; 487 int it_id_set = IT_ID_NOT_SET;
477 488
@@ -525,11 +536,9 @@ sys_timer_create(const clockid_t which_clock,
525 goto out; 536 goto out;
526 } 537 }
527 rcu_read_lock(); 538 rcu_read_lock();
528 process = good_sigevent(&event); 539 new_timer->it_pid = get_pid(good_sigevent(&event));
529 if (process)
530 get_task_struct(process);
531 rcu_read_unlock(); 540 rcu_read_unlock();
532 if (!process) { 541 if (!new_timer->it_pid) {
533 error = -EINVAL; 542 error = -EINVAL;
534 goto out; 543 goto out;
535 } 544 }
@@ -537,8 +546,7 @@ sys_timer_create(const clockid_t which_clock,
537 event.sigev_notify = SIGEV_SIGNAL; 546 event.sigev_notify = SIGEV_SIGNAL;
538 event.sigev_signo = SIGALRM; 547 event.sigev_signo = SIGALRM;
539 event.sigev_value.sival_int = new_timer->it_id; 548 event.sigev_value.sival_int = new_timer->it_id;
540 process = current->group_leader; 549 new_timer->it_pid = get_pid(task_tgid(current));
541 get_task_struct(process);
542 } 550 }
543 551
544 new_timer->it_sigev_notify = event.sigev_notify; 552 new_timer->it_sigev_notify = event.sigev_notify;
@@ -548,7 +556,7 @@ sys_timer_create(const clockid_t which_clock,
548 new_timer->sigq->info.si_code = SI_TIMER; 556 new_timer->sigq->info.si_code = SI_TIMER;
549 557
550 spin_lock_irq(&current->sighand->siglock); 558 spin_lock_irq(&current->sighand->siglock);
551 new_timer->it_process = process; 559 new_timer->it_signal = current->signal;
552 list_add(&new_timer->list, &current->signal->posix_timers); 560 list_add(&new_timer->list, &current->signal->posix_timers);
553 spin_unlock_irq(&current->sighand->siglock); 561 spin_unlock_irq(&current->sighand->siglock);
554 562
@@ -583,8 +591,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
583 timr = idr_find(&posix_timers_id, (int)timer_id); 591 timr = idr_find(&posix_timers_id, (int)timer_id);
584 if (timr) { 592 if (timr) {
585 spin_lock(&timr->it_lock); 593 spin_lock(&timr->it_lock);
586 if (timr->it_process && 594 if (timr->it_signal == current->signal) {
587 same_thread_group(timr->it_process, current)) {
588 spin_unlock(&idr_lock); 595 spin_unlock(&idr_lock);
589 return timr; 596 return timr;
590 } 597 }
@@ -653,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
653} 660}
654 661
655/* Get the time remaining on a POSIX.1b interval timer. */ 662/* Get the time remaining on a POSIX.1b interval timer. */
656asmlinkage long 663SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
657sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) 664 struct itimerspec __user *, setting)
658{ 665{
659 struct k_itimer *timr; 666 struct k_itimer *timr;
660 struct itimerspec cur_setting; 667 struct itimerspec cur_setting;
@@ -683,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
683 * the call back to do_schedule_next_timer(). So all we need to do is 690 * the call back to do_schedule_next_timer(). So all we need to do is
684 * to pick up the frozen overrun. 691 * to pick up the frozen overrun.
685 */ 692 */
686asmlinkage long 693SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
687sys_timer_getoverrun(timer_t timer_id)
688{ 694{
689 struct k_itimer *timr; 695 struct k_itimer *timr;
690 int overrun; 696 int overrun;
@@ -752,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags,
752} 758}
753 759
754/* Set a POSIX.1b interval timer */ 760/* Set a POSIX.1b interval timer */
755asmlinkage long 761SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
756sys_timer_settime(timer_t timer_id, int flags, 762 const struct itimerspec __user *, new_setting,
757 const struct itimerspec __user *new_setting, 763 struct itimerspec __user *, old_setting)
758 struct itimerspec __user *old_setting)
759{ 764{
760 struct k_itimer *timr; 765 struct k_itimer *timr;
761 struct itimerspec new_spec, old_spec; 766 struct itimerspec new_spec, old_spec;
@@ -808,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer)
808} 813}
809 814
810/* Delete a POSIX.1b interval timer. */ 815/* Delete a POSIX.1b interval timer. */
811asmlinkage long 816SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
812sys_timer_delete(timer_t timer_id)
813{ 817{
814 struct k_itimer *timer; 818 struct k_itimer *timer;
815 unsigned long flags; 819 unsigned long flags;
@@ -831,8 +835,7 @@ retry_delete:
831 * This keeps any tasks waiting on the spin lock from thinking 835 * This keeps any tasks waiting on the spin lock from thinking
832 * they got something (see the lock code above). 836 * they got something (see the lock code above).
833 */ 837 */
834 put_task_struct(timer->it_process); 838 timer->it_signal = NULL;
835 timer->it_process = NULL;
836 839
837 unlock_timer(timer, flags); 840 unlock_timer(timer, flags);
838 release_posix_timer(timer, IT_ID_SET); 841 release_posix_timer(timer, IT_ID_SET);
@@ -858,8 +861,7 @@ retry_delete:
858 * This keeps any tasks waiting on the spin lock from thinking 861 * This keeps any tasks waiting on the spin lock from thinking
859 * they got something (see the lock code above). 862 * they got something (see the lock code above).
860 */ 863 */
861 put_task_struct(timer->it_process); 864 timer->it_signal = NULL;
862 timer->it_process = NULL;
863 865
864 unlock_timer(timer, flags); 866 unlock_timer(timer, flags);
865 release_posix_timer(timer, IT_ID_SET); 867 release_posix_timer(timer, IT_ID_SET);
@@ -897,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
897} 899}
898EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); 900EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
899 901
900asmlinkage long sys_clock_settime(const clockid_t which_clock, 902SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
901 const struct timespec __user *tp) 903 const struct timespec __user *, tp)
902{ 904{
903 struct timespec new_tp; 905 struct timespec new_tp;
904 906
@@ -910,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock,
910 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); 912 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
911} 913}
912 914
913asmlinkage long 915SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
914sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) 916 struct timespec __user *,tp)
915{ 917{
916 struct timespec kernel_tp; 918 struct timespec kernel_tp;
917 int error; 919 int error;
@@ -927,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
927 929
928} 930}
929 931
930asmlinkage long 932SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
931sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) 933 struct timespec __user *, tp)
932{ 934{
933 struct timespec rtn_tp; 935 struct timespec rtn_tp;
934 int error; 936 int error;
@@ -957,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags,
957 which_clock); 959 which_clock);
958} 960}
959 961
960asmlinkage long 962SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
961sys_clock_nanosleep(const clockid_t which_clock, int flags, 963 const struct timespec __user *, rqtp,
962 const struct timespec __user *rqtp, 964 struct timespec __user *, rmtp)
963 struct timespec __user *rmtp)
964{ 965{
965 struct timespec t; 966 struct timespec t;
966 967