diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:11:10 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:11:10 -0400 |
| commit | 164d44fd92e79d5bce54d0d62df9f856f7b23925 (patch) | |
| tree | 9f21607849b7e684b255578ffdf41951bc31787e /ipc | |
| parent | 5bfec46baa3a752393433b8d89d3b2c70820f61d (diff) | |
| parent | d7e81c269db899b800e0963dc4aceece1f82a680 (diff) | |
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
clocksource: Add clocksource_register_hz/khz interface
posix-cpu-timers: Optimize run_posix_cpu_timers()
time: Remove xtime_cache
mqueue: Convert message queue timeout to use hrtimers
hrtimers: Provide schedule_hrtimeout for CLOCK_REALTIME
timers: Introduce the concept of timer slack for legacy timers
ntp: Remove tickadj
ntp: Make time_adjust static
time: Add xtime, wall_to_monotonic to feature-removal-schedule
timer: Try to survive timer callback preempt_count leak
timer: Split out timer function call
timer: Print function name for timer callbacks modifying preemption count
time: Clean up warp_clock()
cpu-timers: Avoid iterating over all threads in fastpath_timer_check()
cpu-timers: Change SIGEV_NONE timer implementation
cpu-timers: Return correct previous timer reload value
cpu-timers: Cleanup arm_timer()
cpu-timers: Simplify RLIMIT_CPU handling
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/mqueue.c | 74 |
1 files changed, 25 insertions, 49 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 59a009dc54a8..5108232f93d4 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
| @@ -429,7 +429,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, | |||
| 429 | * sr: SEND or RECV | 429 | * sr: SEND or RECV |
| 430 | */ | 430 | */ |
| 431 | static int wq_sleep(struct mqueue_inode_info *info, int sr, | 431 | static int wq_sleep(struct mqueue_inode_info *info, int sr, |
| 432 | long timeout, struct ext_wait_queue *ewp) | 432 | ktime_t *timeout, struct ext_wait_queue *ewp) |
| 433 | { | 433 | { |
| 434 | int retval; | 434 | int retval; |
| 435 | signed long time; | 435 | signed long time; |
| @@ -440,7 +440,8 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, | |||
| 440 | set_current_state(TASK_INTERRUPTIBLE); | 440 | set_current_state(TASK_INTERRUPTIBLE); |
| 441 | 441 | ||
| 442 | spin_unlock(&info->lock); | 442 | spin_unlock(&info->lock); |
| 443 | time = schedule_timeout(timeout); | 443 | time = schedule_hrtimeout_range_clock(timeout, |
| 444 | HRTIMER_MODE_ABS, 0, CLOCK_REALTIME); | ||
| 444 | 445 | ||
| 445 | while (ewp->state == STATE_PENDING) | 446 | while (ewp->state == STATE_PENDING) |
| 446 | cpu_relax(); | 447 | cpu_relax(); |
| @@ -552,31 +553,16 @@ static void __do_notify(struct mqueue_inode_info *info) | |||
| 552 | wake_up(&info->wait_q); | 553 | wake_up(&info->wait_q); |
| 553 | } | 554 | } |
| 554 | 555 | ||
| 555 | static long prepare_timeout(struct timespec *p) | 556 | static int prepare_timeout(const struct timespec __user *u_abs_timeout, |
| 557 | ktime_t *expires, struct timespec *ts) | ||
| 556 | { | 558 | { |
| 557 | struct timespec nowts; | 559 | if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) |
| 558 | long timeout; | 560 | return -EFAULT; |
| 559 | 561 | if (!timespec_valid(ts)) | |
| 560 | if (p) { | 562 | return -EINVAL; |
| 561 | if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0 | ||
| 562 | || p->tv_nsec >= NSEC_PER_SEC)) | ||
| 563 | return -EINVAL; | ||
| 564 | nowts = CURRENT_TIME; | ||
| 565 | /* first subtract as jiffies can't be too big */ | ||
| 566 | p->tv_sec -= nowts.tv_sec; | ||
| 567 | if (p->tv_nsec < nowts.tv_nsec) { | ||
| 568 | p->tv_nsec += NSEC_PER_SEC; | ||
| 569 | p->tv_sec--; | ||
| 570 | } | ||
| 571 | p->tv_nsec -= nowts.tv_nsec; | ||
| 572 | if (p->tv_sec < 0) | ||
| 573 | return 0; | ||
| 574 | |||
| 575 | timeout = timespec_to_jiffies(p) + 1; | ||
| 576 | } else | ||
| 577 | return MAX_SCHEDULE_TIMEOUT; | ||
| 578 | 563 | ||
| 579 | return timeout; | 564 | *expires = timespec_to_ktime(*ts); |
| 565 | return 0; | ||
| 580 | } | 566 | } |
| 581 | 567 | ||
| 582 | static void remove_notification(struct mqueue_inode_info *info) | 568 | static void remove_notification(struct mqueue_inode_info *info) |
| @@ -862,22 +848,21 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, | |||
| 862 | struct ext_wait_queue *receiver; | 848 | struct ext_wait_queue *receiver; |
| 863 | struct msg_msg *msg_ptr; | 849 | struct msg_msg *msg_ptr; |
| 864 | struct mqueue_inode_info *info; | 850 | struct mqueue_inode_info *info; |
| 865 | struct timespec ts, *p = NULL; | 851 | ktime_t expires, *timeout = NULL; |
| 866 | long timeout; | 852 | struct timespec ts; |
| 867 | int ret; | 853 | int ret; |
| 868 | 854 | ||
| 869 | if (u_abs_timeout) { | 855 | if (u_abs_timeout) { |
| 870 | if (copy_from_user(&ts, u_abs_timeout, | 856 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
| 871 | sizeof(struct timespec))) | 857 | if (res) |
| 872 | return -EFAULT; | 858 | return res; |
| 873 | p = &ts; | 859 | timeout = &expires; |
| 874 | } | 860 | } |
| 875 | 861 | ||
| 876 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) | 862 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) |
| 877 | return -EINVAL; | 863 | return -EINVAL; |
| 878 | 864 | ||
| 879 | audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); | 865 | audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); |
| 880 | timeout = prepare_timeout(p); | ||
| 881 | 866 | ||
| 882 | filp = fget(mqdes); | 867 | filp = fget(mqdes); |
| 883 | if (unlikely(!filp)) { | 868 | if (unlikely(!filp)) { |
| @@ -919,9 +904,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, | |||
| 919 | if (filp->f_flags & O_NONBLOCK) { | 904 | if (filp->f_flags & O_NONBLOCK) { |
| 920 | spin_unlock(&info->lock); | 905 | spin_unlock(&info->lock); |
| 921 | ret = -EAGAIN; | 906 | ret = -EAGAIN; |
| 922 | } else if (unlikely(timeout < 0)) { | ||
| 923 | spin_unlock(&info->lock); | ||
| 924 | ret = timeout; | ||
| 925 | } else { | 907 | } else { |
| 926 | wait.task = current; | 908 | wait.task = current; |
| 927 | wait.msg = (void *) msg_ptr; | 909 | wait.msg = (void *) msg_ptr; |
| @@ -954,24 +936,23 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, | |||
| 954 | size_t, msg_len, unsigned int __user *, u_msg_prio, | 936 | size_t, msg_len, unsigned int __user *, u_msg_prio, |
| 955 | const struct timespec __user *, u_abs_timeout) | 937 | const struct timespec __user *, u_abs_timeout) |
| 956 | { | 938 | { |
| 957 | long timeout; | ||
| 958 | ssize_t ret; | 939 | ssize_t ret; |
| 959 | struct msg_msg *msg_ptr; | 940 | struct msg_msg *msg_ptr; |
| 960 | struct file *filp; | 941 | struct file *filp; |
| 961 | struct inode *inode; | 942 | struct inode *inode; |
| 962 | struct mqueue_inode_info *info; | 943 | struct mqueue_inode_info *info; |
| 963 | struct ext_wait_queue wait; | 944 | struct ext_wait_queue wait; |
| 964 | struct timespec ts, *p = NULL; | 945 | ktime_t expires, *timeout = NULL; |
| 946 | struct timespec ts; | ||
| 965 | 947 | ||
| 966 | if (u_abs_timeout) { | 948 | if (u_abs_timeout) { |
| 967 | if (copy_from_user(&ts, u_abs_timeout, | 949 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
| 968 | sizeof(struct timespec))) | 950 | if (res) |
| 969 | return -EFAULT; | 951 | return res; |
| 970 | p = &ts; | 952 | timeout = &expires; |
| 971 | } | 953 | } |
| 972 | 954 | ||
| 973 | audit_mq_sendrecv(mqdes, msg_len, 0, p); | 955 | audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); |
| 974 | timeout = prepare_timeout(p); | ||
| 975 | 956 | ||
| 976 | filp = fget(mqdes); | 957 | filp = fget(mqdes); |
| 977 | if (unlikely(!filp)) { | 958 | if (unlikely(!filp)) { |
| @@ -1003,11 +984,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, | |||
| 1003 | if (filp->f_flags & O_NONBLOCK) { | 984 | if (filp->f_flags & O_NONBLOCK) { |
| 1004 | spin_unlock(&info->lock); | 985 | spin_unlock(&info->lock); |
| 1005 | ret = -EAGAIN; | 986 | ret = -EAGAIN; |
| 1006 | msg_ptr = NULL; | ||
| 1007 | } else if (unlikely(timeout < 0)) { | ||
| 1008 | spin_unlock(&info->lock); | ||
| 1009 | ret = timeout; | ||
| 1010 | msg_ptr = NULL; | ||
| 1011 | } else { | 987 | } else { |
| 1012 | wait.task = current; | 988 | wait.task = current; |
| 1013 | wait.state = STATE_NONE; | 989 | wait.state = STATE_NONE; |
