diff options
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r-- | kernel/posix-timers.c | 83 |
1 files changed, 39 insertions, 44 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index a140e44eebba..052ec4d195c7 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock); | |||
116 | * must supply functions here, even if the function just returns | 116 | * must supply functions here, even if the function just returns |
117 | * ENOSYS. The standard POSIX timer management code assumes the | 117 | * ENOSYS. The standard POSIX timer management code assumes the |
118 | * following: 1.) The k_itimer struct (sched.h) is used for the | 118 | * following: 1.) The k_itimer struct (sched.h) is used for the |
119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_process | 119 | * timer. 2.) The list, it_lock, it_clock, it_id and it_pid |
120 | * fields are not modified by timer code. | 120 | * fields are not modified by timer code. |
121 | * | 121 | * |
122 | * At this time all functions EXCEPT clock_nanosleep can be | 122 | * At this time all functions EXCEPT clock_nanosleep can be |
@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info) | |||
319 | 319 | ||
320 | int posix_timer_event(struct k_itimer *timr, int si_private) | 320 | int posix_timer_event(struct k_itimer *timr, int si_private) |
321 | { | 321 | { |
322 | int shared, ret; | 322 | struct task_struct *task; |
323 | int shared, ret = -1; | ||
323 | /* | 324 | /* |
324 | * FIXME: if ->sigq is queued we can race with | 325 | * FIXME: if ->sigq is queued we can race with |
325 | * dequeue_signal()->do_schedule_next_timer(). | 326 | * dequeue_signal()->do_schedule_next_timer(). |
@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private) | |||
333 | */ | 334 | */ |
334 | timr->sigq->info.si_sys_private = si_private; | 335 | timr->sigq->info.si_sys_private = si_private; |
335 | 336 | ||
336 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); | 337 | rcu_read_lock(); |
337 | ret = send_sigqueue(timr->sigq, timr->it_process, shared); | 338 | task = pid_task(timr->it_pid, PIDTYPE_PID); |
339 | if (task) { | ||
340 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); | ||
341 | ret = send_sigqueue(timr->sigq, task, shared); | ||
342 | } | ||
343 | rcu_read_unlock(); | ||
338 | /* If we failed to send the signal the timer stops. */ | 344 | /* If we failed to send the signal the timer stops. */ |
339 | return ret > 0; | 345 | return ret > 0; |
340 | } | 346 | } |
@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) | |||
411 | return ret; | 417 | return ret; |
412 | } | 418 | } |
413 | 419 | ||
414 | static struct task_struct * good_sigevent(sigevent_t * event) | 420 | static struct pid *good_sigevent(sigevent_t * event) |
415 | { | 421 | { |
416 | struct task_struct *rtn = current->group_leader; | 422 | struct task_struct *rtn = current->group_leader; |
417 | 423 | ||
@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event) | |||
425 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) | 431 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) |
426 | return NULL; | 432 | return NULL; |
427 | 433 | ||
428 | return rtn; | 434 | return task_pid(rtn); |
429 | } | 435 | } |
430 | 436 | ||
431 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) | 437 | void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) |
@@ -464,20 +470,19 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
464 | idr_remove(&posix_timers_id, tmr->it_id); | 470 | idr_remove(&posix_timers_id, tmr->it_id); |
465 | spin_unlock_irqrestore(&idr_lock, flags); | 471 | spin_unlock_irqrestore(&idr_lock, flags); |
466 | } | 472 | } |
473 | put_pid(tmr->it_pid); | ||
467 | sigqueue_free(tmr->sigq); | 474 | sigqueue_free(tmr->sigq); |
468 | kmem_cache_free(posix_timers_cache, tmr); | 475 | kmem_cache_free(posix_timers_cache, tmr); |
469 | } | 476 | } |
470 | 477 | ||
471 | /* Create a POSIX.1b interval timer. */ | 478 | /* Create a POSIX.1b interval timer. */ |
472 | 479 | ||
473 | asmlinkage long | 480 | SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, |
474 | sys_timer_create(const clockid_t which_clock, | 481 | struct sigevent __user *, timer_event_spec, |
475 | struct sigevent __user *timer_event_spec, | 482 | timer_t __user *, created_timer_id) |
476 | timer_t __user * created_timer_id) | ||
477 | { | 483 | { |
478 | struct k_itimer *new_timer; | 484 | struct k_itimer *new_timer; |
479 | int error, new_timer_id; | 485 | int error, new_timer_id; |
480 | struct task_struct *process; | ||
481 | sigevent_t event; | 486 | sigevent_t event; |
482 | int it_id_set = IT_ID_NOT_SET; | 487 | int it_id_set = IT_ID_NOT_SET; |
483 | 488 | ||
@@ -531,11 +536,9 @@ sys_timer_create(const clockid_t which_clock, | |||
531 | goto out; | 536 | goto out; |
532 | } | 537 | } |
533 | rcu_read_lock(); | 538 | rcu_read_lock(); |
534 | process = good_sigevent(&event); | 539 | new_timer->it_pid = get_pid(good_sigevent(&event)); |
535 | if (process) | ||
536 | get_task_struct(process); | ||
537 | rcu_read_unlock(); | 540 | rcu_read_unlock(); |
538 | if (!process) { | 541 | if (!new_timer->it_pid) { |
539 | error = -EINVAL; | 542 | error = -EINVAL; |
540 | goto out; | 543 | goto out; |
541 | } | 544 | } |
@@ -543,8 +546,7 @@ sys_timer_create(const clockid_t which_clock, | |||
543 | event.sigev_notify = SIGEV_SIGNAL; | 546 | event.sigev_notify = SIGEV_SIGNAL; |
544 | event.sigev_signo = SIGALRM; | 547 | event.sigev_signo = SIGALRM; |
545 | event.sigev_value.sival_int = new_timer->it_id; | 548 | event.sigev_value.sival_int = new_timer->it_id; |
546 | process = current->group_leader; | 549 | new_timer->it_pid = get_pid(task_tgid(current)); |
547 | get_task_struct(process); | ||
548 | } | 550 | } |
549 | 551 | ||
550 | new_timer->it_sigev_notify = event.sigev_notify; | 552 | new_timer->it_sigev_notify = event.sigev_notify; |
@@ -554,7 +556,7 @@ sys_timer_create(const clockid_t which_clock, | |||
554 | new_timer->sigq->info.si_code = SI_TIMER; | 556 | new_timer->sigq->info.si_code = SI_TIMER; |
555 | 557 | ||
556 | spin_lock_irq(¤t->sighand->siglock); | 558 | spin_lock_irq(¤t->sighand->siglock); |
557 | new_timer->it_process = process; | 559 | new_timer->it_signal = current->signal; |
558 | list_add(&new_timer->list, ¤t->signal->posix_timers); | 560 | list_add(&new_timer->list, ¤t->signal->posix_timers); |
559 | spin_unlock_irq(¤t->sighand->siglock); | 561 | spin_unlock_irq(¤t->sighand->siglock); |
560 | 562 | ||
@@ -589,8 +591,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) | |||
589 | timr = idr_find(&posix_timers_id, (int)timer_id); | 591 | timr = idr_find(&posix_timers_id, (int)timer_id); |
590 | if (timr) { | 592 | if (timr) { |
591 | spin_lock(&timr->it_lock); | 593 | spin_lock(&timr->it_lock); |
592 | if (timr->it_process && | 594 | if (timr->it_signal == current->signal) { |
593 | same_thread_group(timr->it_process, current)) { | ||
594 | spin_unlock(&idr_lock); | 595 | spin_unlock(&idr_lock); |
595 | return timr; | 596 | return timr; |
596 | } | 597 | } |
@@ -659,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
659 | } | 660 | } |
660 | 661 | ||
661 | /* Get the time remaining on a POSIX.1b interval timer. */ | 662 | /* Get the time remaining on a POSIX.1b interval timer. */ |
662 | asmlinkage long | 663 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
663 | sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) | 664 | struct itimerspec __user *, setting) |
664 | { | 665 | { |
665 | struct k_itimer *timr; | 666 | struct k_itimer *timr; |
666 | struct itimerspec cur_setting; | 667 | struct itimerspec cur_setting; |
@@ -689,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) | |||
689 | * the call back to do_schedule_next_timer(). So all we need to do is | 690 | * the call back to do_schedule_next_timer(). So all we need to do is |
690 | * to pick up the frozen overrun. | 691 | * to pick up the frozen overrun. |
691 | */ | 692 | */ |
692 | asmlinkage long | 693 | SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) |
693 | sys_timer_getoverrun(timer_t timer_id) | ||
694 | { | 694 | { |
695 | struct k_itimer *timr; | 695 | struct k_itimer *timr; |
696 | int overrun; | 696 | int overrun; |
@@ -758,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
758 | } | 758 | } |
759 | 759 | ||
760 | /* Set a POSIX.1b interval timer */ | 760 | /* Set a POSIX.1b interval timer */ |
761 | asmlinkage long | 761 | SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, |
762 | sys_timer_settime(timer_t timer_id, int flags, | 762 | const struct itimerspec __user *, new_setting, |
763 | const struct itimerspec __user *new_setting, | 763 | struct itimerspec __user *, old_setting) |
764 | struct itimerspec __user *old_setting) | ||
765 | { | 764 | { |
766 | struct k_itimer *timr; | 765 | struct k_itimer *timr; |
767 | struct itimerspec new_spec, old_spec; | 766 | struct itimerspec new_spec, old_spec; |
@@ -814,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer) | |||
814 | } | 813 | } |
815 | 814 | ||
816 | /* Delete a POSIX.1b interval timer. */ | 815 | /* Delete a POSIX.1b interval timer. */ |
817 | asmlinkage long | 816 | SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) |
818 | sys_timer_delete(timer_t timer_id) | ||
819 | { | 817 | { |
820 | struct k_itimer *timer; | 818 | struct k_itimer *timer; |
821 | unsigned long flags; | 819 | unsigned long flags; |
@@ -837,8 +835,7 @@ retry_delete: | |||
837 | * This keeps any tasks waiting on the spin lock from thinking | 835 | * This keeps any tasks waiting on the spin lock from thinking |
838 | * they got something (see the lock code above). | 836 | * they got something (see the lock code above). |
839 | */ | 837 | */ |
840 | put_task_struct(timer->it_process); | 838 | timer->it_signal = NULL; |
841 | timer->it_process = NULL; | ||
842 | 839 | ||
843 | unlock_timer(timer, flags); | 840 | unlock_timer(timer, flags); |
844 | release_posix_timer(timer, IT_ID_SET); | 841 | release_posix_timer(timer, IT_ID_SET); |
@@ -864,8 +861,7 @@ retry_delete: | |||
864 | * This keeps any tasks waiting on the spin lock from thinking | 861 | * This keeps any tasks waiting on the spin lock from thinking |
865 | * they got something (see the lock code above). | 862 | * they got something (see the lock code above). |
866 | */ | 863 | */ |
867 | put_task_struct(timer->it_process); | 864 | timer->it_signal = NULL; |
868 | timer->it_process = NULL; | ||
869 | 865 | ||
870 | unlock_timer(timer, flags); | 866 | unlock_timer(timer, flags); |
871 | release_posix_timer(timer, IT_ID_SET); | 867 | release_posix_timer(timer, IT_ID_SET); |
@@ -903,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags, | |||
903 | } | 899 | } |
904 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); | 900 | EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); |
905 | 901 | ||
906 | asmlinkage long sys_clock_settime(const clockid_t which_clock, | 902 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, |
907 | const struct timespec __user *tp) | 903 | const struct timespec __user *, tp) |
908 | { | 904 | { |
909 | struct timespec new_tp; | 905 | struct timespec new_tp; |
910 | 906 | ||
@@ -916,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock, | |||
916 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); | 912 | return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); |
917 | } | 913 | } |
918 | 914 | ||
919 | asmlinkage long | 915 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, |
920 | sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) | 916 | struct timespec __user *,tp) |
921 | { | 917 | { |
922 | struct timespec kernel_tp; | 918 | struct timespec kernel_tp; |
923 | int error; | 919 | int error; |
@@ -933,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) | |||
933 | 929 | ||
934 | } | 930 | } |
935 | 931 | ||
936 | asmlinkage long | 932 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, |
937 | sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) | 933 | struct timespec __user *, tp) |
938 | { | 934 | { |
939 | struct timespec rtn_tp; | 935 | struct timespec rtn_tp; |
940 | int error; | 936 | int error; |
@@ -963,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags, | |||
963 | which_clock); | 959 | which_clock); |
964 | } | 960 | } |
965 | 961 | ||
966 | asmlinkage long | 962 | SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, |
967 | sys_clock_nanosleep(const clockid_t which_clock, int flags, | 963 | const struct timespec __user *, rqtp, |
968 | const struct timespec __user *rqtp, | 964 | struct timespec __user *, rmtp) |
969 | struct timespec __user *rmtp) | ||
970 | { | 965 | { |
971 | struct timespec t; | 966 | struct timespec t; |
972 | 967 | ||