aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58d6aba..955ec152cb71 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -441,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
441 if (state == TCP_LISTEN) 441 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1); 442 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */ 443 /* passed fds are erased in the kfree_skb hook */
444 UNIXCB(skb).consumed = skb->len;
444 kfree_skb(skb); 445 kfree_skb(skb);
445 } 446 }
446 447
@@ -1799,6 +1800,7 @@ alloc_skb:
1799 * this - does no harm 1800 * this - does no harm
1800 */ 1801 */
1801 consume_skb(newskb); 1802 consume_skb(newskb);
1803 newskb = NULL;
1802 } 1804 }
1803 1805
1804 if (skb_append_pagefrags(skb, page, offset, size)) { 1806 if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,8 +1813,11 @@ alloc_skb:
1811 skb->truesize += size; 1813 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc); 1814 atomic_add(size, &sk->sk_wmem_alloc);
1813 1815
1814 if (newskb) 1816 if (newskb) {
1817 spin_lock(&other->sk_receive_queue.lock);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb); 1818 __skb_queue_tail(&other->sk_receive_queue, newskb);
1819 spin_unlock(&other->sk_receive_queue.lock);
1820 }
1816 1821
1817 unix_state_unlock(other); 1822 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock); 1823 mutex_unlock(&unix_sk(other)->readlock);
@@ -2072,6 +2077,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2072 2077
2073 do { 2078 do {
2074 int chunk; 2079 int chunk;
2080 bool drop_skb;
2075 struct sk_buff *skb, *last; 2081 struct sk_buff *skb, *last;
2076 2082
2077 unix_state_lock(sk); 2083 unix_state_lock(sk);
@@ -2152,7 +2158,11 @@ unlock:
2152 } 2158 }
2153 2159
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2160 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2161 skb_get(skb);
2155 chunk = state->recv_actor(skb, skip, chunk, state); 2162 chunk = state->recv_actor(skb, skip, chunk, state);
2163 drop_skb = !unix_skb_len(skb);
2164 /* skb is only safe to use if !drop_skb */
2165 consume_skb(skb);
2156 if (chunk < 0) { 2166 if (chunk < 0) {
2157 if (copied == 0) 2167 if (copied == 0)
2158 copied = -EFAULT; 2168 copied = -EFAULT;
@@ -2161,6 +2171,18 @@ unlock:
2161 copied += chunk; 2171 copied += chunk;
2162 size -= chunk; 2172 size -= chunk;
2163 2173
2174 if (drop_skb) {
2175 /* the skb was touched by a concurrent reader;
2176 * we should not expect anything from this skb
2177 * anymore and assume it invalid - we can be
2178 * sure it was dropped from the socket queue
2179 *
2180 * let's report a short read
2181 */
2182 err = 0;
2183 break;
2184 }
2185
2164 /* Mark read part of skb as used */ 2186 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) { 2187 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk; 2188 UNIXCB(skb).consumed += chunk;