aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@parallels.com>2011-12-14 21:46:14 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-16 13:48:28 -0500
commit2aac7a2cb0d9d8c65fc7dde3e19e46b3e878d23d (patch)
treea2308c69d4a03fdb99596f336b7cdfeb6692c7a8 /net/unix
parentac02be8d96af9f66a4de86781ee9facc2dff99d4 (diff)
unix_diag: Pending connections IDs NLA
When establishing a unix connection on stream sockets the server end receives an skb with socket in its receive queue. Report who is waiting for these ends to be accepted for listening sockets via NLA. There's a lokcing issue with this -- the unix sk state lock is required to access the peer, and it is taken under the listening sk's queue lock. Strictly speaking the queue lock should be taken inside the state lock, but since in this case these two sockets are different it shouldn't lead to deadlock. Signed-off-by: Pavel Emelyanov <xemul@parallels.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/diag.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 0e0fda786afe..24c7a65d9cb1 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -63,6 +63,41 @@ rtattr_failure:
63 return -EMSGSIZE; 63 return -EMSGSIZE;
64} 64}
65 65
66static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
67{
68 struct sk_buff *skb;
69 u32 *buf;
70 int i;
71
72 if (sk->sk_state == TCP_LISTEN) {
73 spin_lock(&sk->sk_receive_queue.lock);
74 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, sk->sk_receive_queue.qlen);
75 i = 0;
76 skb_queue_walk(&sk->sk_receive_queue, skb) {
77 struct sock *req, *peer;
78
79 req = skb->sk;
80 /*
81 * The state lock is outer for the same sk's
82 * queue lock. With the other's queue locked it's
83 * OK to lock the state.
84 */
85 unix_state_lock_nested(req);
86 peer = unix_sk(req)->peer;
87 if (peer)
88 buf[i++] = sock_i_ino(peer);
89 unix_state_unlock(req);
90 }
91 spin_unlock(&sk->sk_receive_queue.lock);
92 }
93
94 return 0;
95
96rtattr_failure:
97 spin_unlock(&sk->sk_receive_queue.lock);
98 return -EMSGSIZE;
99}
100
66static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 101static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
67 u32 pid, u32 seq, u32 flags, int sk_ino) 102 u32 pid, u32 seq, u32 flags, int sk_ino)
68{ 103{
@@ -93,6 +128,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
93 sk_diag_dump_peer(sk, skb)) 128 sk_diag_dump_peer(sk, skb))
94 goto nlmsg_failure; 129 goto nlmsg_failure;
95 130
131 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
132 sk_diag_dump_icons(sk, skb))
133 goto nlmsg_failure;
134
96 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 135 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
97 return skb->len; 136 return skb->len;
98 137