diff options
| author | Manfred Spraul <manfred@colorfullife.com> | 2013-09-03 10:00:08 -0400 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-10-18 10:45:47 -0400 |
| commit | 11ce33923261281f406a99ee345ffc5f53aec2c8 (patch) | |
| tree | ce667b762a9510a5869e6dbd82a211433e5fb80b /ipc | |
| parent | b56e88e25e1d576619343e97fdb6cbe11035cf6d (diff) | |
ipc/msg.c: Fix lost wakeup in msgsnd().
commit bebcb928c820d0ee83aca4b192adc195e43e66a2 upstream.
The check if the queue is full and adding current to the wait queue of
pending msgsnd() operations (ss_add()) must be atomic.
Otherwise:
- the thread that performs msgsnd() finds a full queue and decides to
sleep.
- the thread that performs msgrcv() first reads all messages from the
queue and then sleeps, because the queue is empty.
- the msgrcv() calls do not perform any wakeups, because the msgsnd()
task has not yet called ss_add().
- then the msgsnd()-thread first calls ss_add() and then sleeps.
Net result: msgsnd() and msgrcv() both sleep forever.
Observed with msgctl08 from ltp with a preemptible kernel.
Fix: Call ipc_lock_object() before performing the check.
The patch also moves security_msg_queue_msgsnd() under ipc_lock_object:
- msgctl(IPC_SET) explicitely mentions that it tries to expunge any
pending operations that are not allowed anymore with the new
permissions. If security_msg_queue_msgsnd() is called without locks,
then there might be races.
- it makes the patch much simpler.
Reported-and-tested-by: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/msg.c | 12 |
1 files changed, 5 insertions, 7 deletions
| @@ -680,16 +680,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
| 680 | goto out_unlock1; | 680 | goto out_unlock1; |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | ipc_lock_object(&msq->q_perm); | ||
| 684 | |||
| 683 | for (;;) { | 685 | for (;;) { |
| 684 | struct msg_sender s; | 686 | struct msg_sender s; |
| 685 | 687 | ||
| 686 | err = -EACCES; | 688 | err = -EACCES; |
| 687 | if (ipcperms(ns, &msq->q_perm, S_IWUGO)) | 689 | if (ipcperms(ns, &msq->q_perm, S_IWUGO)) |
| 688 | goto out_unlock1; | 690 | goto out_unlock0; |
| 689 | 691 | ||
| 690 | err = security_msg_queue_msgsnd(msq, msg, msgflg); | 692 | err = security_msg_queue_msgsnd(msq, msg, msgflg); |
| 691 | if (err) | 693 | if (err) |
| 692 | goto out_unlock1; | 694 | goto out_unlock0; |
| 693 | 695 | ||
| 694 | if (msgsz + msq->q_cbytes <= msq->q_qbytes && | 696 | if (msgsz + msq->q_cbytes <= msq->q_qbytes && |
| 695 | 1 + msq->q_qnum <= msq->q_qbytes) { | 697 | 1 + msq->q_qnum <= msq->q_qbytes) { |
| @@ -699,10 +701,9 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
| 699 | /* queue full, wait: */ | 701 | /* queue full, wait: */ |
| 700 | if (msgflg & IPC_NOWAIT) { | 702 | if (msgflg & IPC_NOWAIT) { |
| 701 | err = -EAGAIN; | 703 | err = -EAGAIN; |
| 702 | goto out_unlock1; | 704 | goto out_unlock0; |
| 703 | } | 705 | } |
| 704 | 706 | ||
| 705 | ipc_lock_object(&msq->q_perm); | ||
| 706 | ss_add(msq, &s); | 707 | ss_add(msq, &s); |
| 707 | 708 | ||
| 708 | if (!ipc_rcu_getref(msq)) { | 709 | if (!ipc_rcu_getref(msq)) { |
| @@ -730,10 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
| 730 | goto out_unlock0; | 731 | goto out_unlock0; |
| 731 | } | 732 | } |
| 732 | 733 | ||
| 733 | ipc_unlock_object(&msq->q_perm); | ||
| 734 | } | 734 | } |
| 735 | |||
| 736 | ipc_lock_object(&msq->q_perm); | ||
| 737 | msq->q_lspid = task_tgid_vnr(current); | 735 | msq->q_lspid = task_tgid_vnr(current); |
| 738 | msq->q_stime = get_seconds(); | 736 | msq->q_stime = get_seconds(); |
| 739 | 737 | ||
