diff options
author | Davidlohr Bueso <davidlohr@hp.com> | 2014-01-27 20:07:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-28 00:02:40 -0500 |
commit | ffa571dafbaec0c74e374ce0ea7b4212b6cbc94c (patch) | |
tree | 21f19b4f4ddcf219c7b024c2cf7ffbac9d0a80b8 /ipc | |
parent | daf948c7d1a080041ae19aca07625efec670695a (diff) |
ipc,msg: document barriers
Both expunge_all() and pipeline_send() rely on both a nil msg value and
a full barrier to guarantee the correct ordering when waking up a task.
While its counterpart at the receiving end is well documented for the
lockless recv algorithm, we still need to document these specific
smp_mb() calls.
[akpm@linux-foundation.org: fix typo, per Mike]
[akpm@linux-foundation.org: mroe tpyos]
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/msg.c | 19 |
1 files changed, 17 insertions, 2 deletions
@@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res) | |||
253 | struct msg_receiver *msr, *t; | 253 | struct msg_receiver *msr, *t; |
254 | 254 | ||
255 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { | 255 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { |
256 | msr->r_msg = NULL; | 256 | msr->r_msg = NULL; /* initialize expunge ordering */ |
257 | wake_up_process(msr->r_tsk); | 257 | wake_up_process(msr->r_tsk); |
258 | /* | ||
259 | * Ensure that the wakeup is visible before setting r_msg as | ||
260 | * the receiving end depends on it: either spinning on a nil, | ||
261 | * or dealing with -EAGAIN cases. See lockless receive part 1 | ||
262 | * and 2 in do_msgrcv(). | ||
263 | */ | ||
258 | smp_mb(); | 264 | smp_mb(); |
259 | msr->r_msg = ERR_PTR(res); | 265 | msr->r_msg = ERR_PTR(res); |
260 | } | 266 | } |
@@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) | |||
638 | 644 | ||
639 | list_del(&msr->r_list); | 645 | list_del(&msr->r_list); |
640 | if (msr->r_maxsize < msg->m_ts) { | 646 | if (msr->r_maxsize < msg->m_ts) { |
647 | /* initialize pipelined send ordering */ | ||
641 | msr->r_msg = NULL; | 648 | msr->r_msg = NULL; |
642 | wake_up_process(msr->r_tsk); | 649 | wake_up_process(msr->r_tsk); |
643 | smp_mb(); | 650 | smp_mb(); /* see barrier comment below */ |
644 | msr->r_msg = ERR_PTR(-E2BIG); | 651 | msr->r_msg = ERR_PTR(-E2BIG); |
645 | } else { | 652 | } else { |
646 | msr->r_msg = NULL; | 653 | msr->r_msg = NULL; |
647 | msq->q_lrpid = task_pid_vnr(msr->r_tsk); | 654 | msq->q_lrpid = task_pid_vnr(msr->r_tsk); |
648 | msq->q_rtime = get_seconds(); | 655 | msq->q_rtime = get_seconds(); |
649 | wake_up_process(msr->r_tsk); | 656 | wake_up_process(msr->r_tsk); |
657 | /* | ||
658 | * Ensure that the wakeup is visible before | ||
659 | * setting r_msg, as the receiving end depends | ||
660 | * on it. See lockless receive part 1 and 2 in | ||
661 | * do_msgrcv(). | ||
662 | */ | ||
650 | smp_mb(); | 663 | smp_mb(); |
651 | msr->r_msg = msg; | 664 | msr->r_msg = msg; |
652 | 665 | ||
@@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) | |||
654 | } | 667 | } |
655 | } | 668 | } |
656 | } | 669 | } |
670 | |||
657 | return 0; | 671 | return 0; |
658 | } | 672 | } |
659 | 673 | ||
@@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, | |||
716 | goto out_unlock0; | 730 | goto out_unlock0; |
717 | } | 731 | } |
718 | 732 | ||
733 | /* enqueue the sender and prepare to block */ | ||
719 | ss_add(msq, &s); | 734 | ss_add(msq, &s); |
720 | 735 | ||
721 | if (!ipc_rcu_getref(msq)) { | 736 | if (!ipc_rcu_getref(msq)) { |