aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-03 19:02:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-03 19:02:46 -0500
commit071f5d105a0ae93aeb02197c4ee3557e8cc57a21 (patch)
tree3d1cee6ce0235a2d36fffbae2b4f4ecd8121107d /net/unix/af_unix.c
parent2873d32ff493ecbfb7d2c7f56812ab941dda42f4 (diff)
parente3c9b1ef78eb111f925dd04992a3de1f383e8f49 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A lot of Thanksgiving turkey leftovers accumulated, here goes: 1) Fix bluetooth l2cap_chan object leak, from Johan Hedberg. 2) IDs for some new iwlwifi chips, from Oren Givon. 3) Fix rtlwifi lockups on boot, from Larry Finger. 4) Fix memory leak in fm10k, from Stephen Hemminger. 5) We have a route leak in the ipv6 tunnel infrastructure, fix from Paolo Abeni. 6) Fix buffer pointer handling in arm64 bpf JIT,f rom Zi Shen Lim. 7) Wrong lockdep annotations in tcp md5 support, fix from Eric Dumazet. 8) Work around some middle boxes which prevent proper handling of TCP Fast Open, from Yuchung Cheng. 9) TCP repair can do huge kmalloc() requests, build paged SKBs instead. From Eric Dumazet. 10) Fix msg_controllen overflow in scm_detach_fds, from Daniel Borkmann. 11) Fix device leaks on ipmr table destruction in ipv4 and ipv6, from Nikolay Aleksandrov. 12) Fix use after free in epoll with AF_UNIX sockets, from Rainer Weikusat. 13) Fix double free in VRF code, from Nikolay Aleksandrov. 14) Fix skb leaks on socket receive queue in tipc, from Ying Xue. 15) Fix ifup/ifdown crach in xgene driver, from Iyappan Subramanian. 16) Fix clearing of persistent array maps in bpf, from Daniel Borkmann. 17) In TCP, for the cross-SYN case, we don't initialize tp->copied_seq early enough. From Eric Dumazet. 18) Fix out of bounds accesses in bpf array implementation when updating elements, from Daniel Borkmann. 19) Fill gaps in RCU protection of np->opt in ipv6 stack, from Eric Dumazet. 20) When dumping proxy neigh entries, we have to accomodate NULL device pointers properly, from Konstantin Khlebnikov. 21) SCTP doesn't release all ipv6 socket resources properly, fix from Eric Dumazet. 22) Prevent underflows of sch->q.qlen for multiqueue packet schedulers, also from Eric Dumazet. 23) Fix MAC and unicast list handling in bnxt_en driver, from Jeffrey Huang and Michael Chan. 24) Don't actively scan radar channels, from Antonio Quartulli" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (110 commits) net: phy: reset only targeted phy bnxt_en: Setup uc_list mac filters after resetting the chip. bnxt_en: enforce proper storing of MAC address bnxt_en: Fixed incorrect implementation of ndo_set_mac_address net: lpc_eth: remove irq > NR_IRQS check from probe() net_sched: fix qdisc_tree_decrease_qlen() races openvswitch: fix hangup on vxlan/gre/geneve device deletion ipv4: igmp: Allow removing groups from a removed interface ipv6: sctp: implement sctp_v6_destroy_sock() arm64: bpf: add 'store immediate' instruction ipv6: kill sk_dst_lock ipv6: sctp: add rcu protection around np->opt net/neighbour: fix crash at dumping device-agnostic proxy entries sctp: use GFP_USER for user-controlled kmalloc sctp: convert sack_needed and sack_generation to bits ipv6: add complete rcu protection around np->opt bpf: fix allocation warnings in bpf maps and integer overflow mvebu: dts: enable IP checksum with jumbo frames for Armada 38x on Port0 net: mvneta: enable setting custom TX IP checksum limit net: mvneta: fix error path for building skb ...
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c268
1 files changed, 231 insertions, 37 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 955ec152cb71..45aebd966978 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
326 return s; 326 return s;
327} 327}
328 328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
329static int unix_writable(const struct sock *sk) 441static int unix_writable(const struct sock *sk)
330{ 442{
331 return sk->sk_state != TCP_LISTEN && 443 return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
431 skpair->sk_state_change(skpair); 543 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
433 } 545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
434 sock_put(skpair); /* It may now die */ 548 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL; 549 unix_peer(sk) = NULL;
436 } 550 }
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
666 INIT_LIST_HEAD(&u->link); 780 INIT_LIST_HEAD(&u->link);
667 mutex_init(&u->readlock); /* single task reading lock */ 781 mutex_init(&u->readlock); /* single task reading lock */
668 init_waitqueue_head(&u->peer_wait); 782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
669 unix_insert_socket(unix_sockets_unbound(sk), sk); 784 unix_insert_socket(unix_sockets_unbound(sk), sk);
670out: 785out:
671 if (sk == NULL) 786 if (sk == NULL)
@@ -1033,6 +1148,8 @@ restart:
1033 if (unix_peer(sk)) { 1148 if (unix_peer(sk)) {
1034 struct sock *old_peer = unix_peer(sk); 1149 struct sock *old_peer = unix_peer(sk);
1035 unix_peer(sk) = other; 1150 unix_peer(sk) = other;
1151 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1152
1036 unix_state_double_unlock(sk, other); 1153 unix_state_double_unlock(sk, other);
1037 1154
1038 if (other != old_peer) 1155 if (other != old_peer)
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1434 return err; 1551 return err;
1435} 1552}
1436 1553
1554static bool unix_passcred_enabled(const struct socket *sock,
1555 const struct sock *other)
1556{
1557 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1558 !other->sk_socket ||
1559 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1560}
1561
1437/* 1562/*
1438 * Some apps rely on write() giving SCM_CREDENTIALS 1563 * Some apps rely on write() giving SCM_CREDENTIALS
1439 * We include credentials if source or destination socket 1564 * We include credentials if source or destination socket
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1444{ 1569{
1445 if (UNIXCB(skb).pid) 1570 if (UNIXCB(skb).pid)
1446 return; 1571 return;
1447 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1572 if (unix_passcred_enabled(sock, other)) {
1448 !other->sk_socket ||
1449 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1450 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1573 UNIXCB(skb).pid = get_pid(task_tgid(current));
1451 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1574 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1452 } 1575 }
1453} 1576}
1454 1577
1578static int maybe_init_creds(struct scm_cookie *scm,
1579 struct socket *socket,
1580 const struct sock *other)
1581{
1582 int err;
1583 struct msghdr msg = { .msg_controllen = 0 };
1584
1585 err = scm_send(socket, &msg, scm, false);
1586 if (err)
1587 return err;
1588
1589 if (unix_passcred_enabled(socket, other)) {
1590 scm->pid = get_pid(task_tgid(current));
1591 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1592 }
1593 return err;
1594}
1595
1596static bool unix_skb_scm_eq(struct sk_buff *skb,
1597 struct scm_cookie *scm)
1598{
1599 const struct unix_skb_parms *u = &UNIXCB(skb);
1600
1601 return u->pid == scm->pid &&
1602 uid_eq(u->uid, scm->creds.uid) &&
1603 gid_eq(u->gid, scm->creds.gid) &&
1604 unix_secdata_eq(scm, skb);
1605}
1606
1455/* 1607/*
1456 * Send AF_UNIX data. 1608 * Send AF_UNIX data.
1457 */ 1609 */
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1472 struct scm_cookie scm; 1624 struct scm_cookie scm;
1473 int max_level; 1625 int max_level;
1474 int data_len = 0; 1626 int data_len = 0;
1627 int sk_locked;
1475 1628
1476 wait_for_unix_gc(); 1629 wait_for_unix_gc();
1477 err = scm_send(sock, msg, &scm, false); 1630 err = scm_send(sock, msg, &scm, false);
@@ -1550,12 +1703,14 @@ restart:
1550 goto out_free; 1703 goto out_free;
1551 } 1704 }
1552 1705
1706 sk_locked = 0;
1553 unix_state_lock(other); 1707 unix_state_lock(other);
1708restart_locked:
1554 err = -EPERM; 1709 err = -EPERM;
1555 if (!unix_may_send(sk, other)) 1710 if (!unix_may_send(sk, other))
1556 goto out_unlock; 1711 goto out_unlock;
1557 1712
1558 if (sock_flag(other, SOCK_DEAD)) { 1713 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1559 /* 1714 /*
1560 * Check with 1003.1g - what should 1715 * Check with 1003.1g - what should
1561 * datagram error 1716 * datagram error
@@ -1563,10 +1718,14 @@ restart:
1563 unix_state_unlock(other); 1718 unix_state_unlock(other);
1564 sock_put(other); 1719 sock_put(other);
1565 1720
1721 if (!sk_locked)
1722 unix_state_lock(sk);
1723
1566 err = 0; 1724 err = 0;
1567 unix_state_lock(sk);
1568 if (unix_peer(sk) == other) { 1725 if (unix_peer(sk) == other) {
1569 unix_peer(sk) = NULL; 1726 unix_peer(sk) = NULL;
1727 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1728
1570 unix_state_unlock(sk); 1729 unix_state_unlock(sk);
1571 1730
1572 unix_dgram_disconnected(sk, other); 1731 unix_dgram_disconnected(sk, other);
@@ -1592,21 +1751,38 @@ restart:
1592 goto out_unlock; 1751 goto out_unlock;
1593 } 1752 }
1594 1753
1595 if (unix_peer(other) != sk && unix_recvq_full(other)) { 1754 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1596 if (!timeo) { 1755 if (timeo) {
1597 err = -EAGAIN; 1756 timeo = unix_wait_for_peer(other, timeo);
1598 goto out_unlock; 1757
1758 err = sock_intr_errno(timeo);
1759 if (signal_pending(current))
1760 goto out_free;
1761
1762 goto restart;
1599 } 1763 }
1600 1764
1601 timeo = unix_wait_for_peer(other, timeo); 1765 if (!sk_locked) {
1766 unix_state_unlock(other);
1767 unix_state_double_lock(sk, other);
1768 }
1602 1769
1603 err = sock_intr_errno(timeo); 1770 if (unix_peer(sk) != other ||
1604 if (signal_pending(current)) 1771 unix_dgram_peer_wake_me(sk, other)) {
1605 goto out_free; 1772 err = -EAGAIN;
1773 sk_locked = 1;
1774 goto out_unlock;
1775 }
1606 1776
1607 goto restart; 1777 if (!sk_locked) {
1778 sk_locked = 1;
1779 goto restart_locked;
1780 }
1608 } 1781 }
1609 1782
1783 if (unlikely(sk_locked))
1784 unix_state_unlock(sk);
1785
1610 if (sock_flag(other, SOCK_RCVTSTAMP)) 1786 if (sock_flag(other, SOCK_RCVTSTAMP))
1611 __net_timestamp(skb); 1787 __net_timestamp(skb);
1612 maybe_add_creds(skb, sock, other); 1788 maybe_add_creds(skb, sock, other);
@@ -1620,6 +1796,8 @@ restart:
1620 return len; 1796 return len;
1621 1797
1622out_unlock: 1798out_unlock:
1799 if (sk_locked)
1800 unix_state_unlock(sk);
1623 unix_state_unlock(other); 1801 unix_state_unlock(other);
1624out_free: 1802out_free:
1625 kfree_skb(skb); 1803 kfree_skb(skb);
@@ -1741,8 +1919,10 @@ out_err:
1741static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, 1919static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1742 int offset, size_t size, int flags) 1920 int offset, size_t size, int flags)
1743{ 1921{
1744 int err = 0; 1922 int err;
1745 bool send_sigpipe = true; 1923 bool send_sigpipe = false;
1924 bool init_scm = true;
1925 struct scm_cookie scm;
1746 struct sock *other, *sk = socket->sk; 1926 struct sock *other, *sk = socket->sk;
1747 struct sk_buff *skb, *newskb = NULL, *tail = NULL; 1927 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1748 1928
@@ -1760,7 +1940,7 @@ alloc_skb:
1760 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1940 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1761 &err, 0); 1941 &err, 0);
1762 if (!newskb) 1942 if (!newskb)
1763 return err; 1943 goto err;
1764 } 1944 }
1765 1945
1766 /* we must acquire readlock as we modify already present 1946 /* we must acquire readlock as we modify already present
@@ -1769,12 +1949,12 @@ alloc_skb:
1769 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1949 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1770 if (err) { 1950 if (err) {
1771 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1951 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1772 send_sigpipe = false;
1773 goto err; 1952 goto err;
1774 } 1953 }
1775 1954
1776 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1955 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1777 err = -EPIPE; 1956 err = -EPIPE;
1957 send_sigpipe = true;
1778 goto err_unlock; 1958 goto err_unlock;
1779 } 1959 }
1780 1960
@@ -1783,17 +1963,27 @@ alloc_skb:
1783 if (sock_flag(other, SOCK_DEAD) || 1963 if (sock_flag(other, SOCK_DEAD) ||
1784 other->sk_shutdown & RCV_SHUTDOWN) { 1964 other->sk_shutdown & RCV_SHUTDOWN) {
1785 err = -EPIPE; 1965 err = -EPIPE;
1966 send_sigpipe = true;
1786 goto err_state_unlock; 1967 goto err_state_unlock;
1787 } 1968 }
1788 1969
1970 if (init_scm) {
1971 err = maybe_init_creds(&scm, socket, other);
1972 if (err)
1973 goto err_state_unlock;
1974 init_scm = false;
1975 }
1976
1789 skb = skb_peek_tail(&other->sk_receive_queue); 1977 skb = skb_peek_tail(&other->sk_receive_queue);
1790 if (tail && tail == skb) { 1978 if (tail && tail == skb) {
1791 skb = newskb; 1979 skb = newskb;
1792 } else if (!skb) { 1980 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1793 if (newskb) 1981 if (newskb) {
1794 skb = newskb; 1982 skb = newskb;
1795 else 1983 } else {
1984 tail = skb;
1796 goto alloc_skb; 1985 goto alloc_skb;
1986 }
1797 } else if (newskb) { 1987 } else if (newskb) {
1798 /* this is fast path, we don't necessarily need to 1988 /* this is fast path, we don't necessarily need to
1799 * call to kfree_skb even though with newskb == NULL 1989 * call to kfree_skb even though with newskb == NULL
@@ -1814,6 +2004,9 @@ alloc_skb:
1814 atomic_add(size, &sk->sk_wmem_alloc); 2004 atomic_add(size, &sk->sk_wmem_alloc);
1815 2005
1816 if (newskb) { 2006 if (newskb) {
2007 err = unix_scm_to_skb(&scm, skb, false);
2008 if (err)
2009 goto err_state_unlock;
1817 spin_lock(&other->sk_receive_queue.lock); 2010 spin_lock(&other->sk_receive_queue.lock);
1818 __skb_queue_tail(&other->sk_receive_queue, newskb); 2011 __skb_queue_tail(&other->sk_receive_queue, newskb);
1819 spin_unlock(&other->sk_receive_queue.lock); 2012 spin_unlock(&other->sk_receive_queue.lock);
@@ -1823,7 +2016,7 @@ alloc_skb:
1823 mutex_unlock(&unix_sk(other)->readlock); 2016 mutex_unlock(&unix_sk(other)->readlock);
1824 2017
1825 other->sk_data_ready(other); 2018 other->sk_data_ready(other);
1826 2019 scm_destroy(&scm);
1827 return size; 2020 return size;
1828 2021
1829err_state_unlock: 2022err_state_unlock:
@@ -1834,6 +2027,8 @@ err:
1834 kfree_skb(newskb); 2027 kfree_skb(newskb);
1835 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2028 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1836 send_sig(SIGPIPE, current, 0); 2029 send_sig(SIGPIPE, current, 0);
2030 if (!init_scm)
2031 scm_destroy(&scm);
1837 return err; 2032 return err;
1838} 2033}
1839 2034
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1996 !timeo) 2191 !timeo)
1997 break; 2192 break;
1998 2193
1999 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2194 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2000 unix_state_unlock(sk); 2195 unix_state_unlock(sk);
2001 timeo = freezable_schedule_timeout(timeo); 2196 timeo = freezable_schedule_timeout(timeo);
2002 unix_state_lock(sk); 2197 unix_state_lock(sk);
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
2004 if (sock_flag(sk, SOCK_DEAD)) 2199 if (sock_flag(sk, SOCK_DEAD))
2005 break; 2200 break;
2006 2201
2007 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2202 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2008 } 2203 }
2009 2204
2010 finish_wait(sk_sleep(sk), &wait); 2205 finish_wait(sk_sleep(sk), &wait);
@@ -2137,10 +2332,7 @@ unlock:
2137 2332
2138 if (check_creds) { 2333 if (check_creds) {
2139 /* Never glue messages from different writers */ 2334 /* Never glue messages from different writers */
2140 if ((UNIXCB(skb).pid != scm.pid) || 2335 if (!unix_skb_scm_eq(skb, &scm))
2141 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2142 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2143 !unix_secdata_eq(&scm, skb))
2144 break; 2336 break;
2145 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { 2337 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2146 /* Copy credentials */ 2338 /* Copy credentials */
@@ -2476,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2476 return mask; 2668 return mask;
2477 2669
2478 writable = unix_writable(sk); 2670 writable = unix_writable(sk);
2479 other = unix_peer_get(sk); 2671 if (writable) {
2480 if (other) { 2672 unix_state_lock(sk);
2481 if (unix_peer(other) != sk) { 2673
2482 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); 2674 other = unix_peer(sk);
2483 if (unix_recvq_full(other)) 2675 if (other && unix_peer(other) != sk &&
2484 writable = 0; 2676 unix_recvq_full(other) &&
2485 } 2677 unix_dgram_peer_wake_me(sk, other))
2486 sock_put(other); 2678 writable = 0;
2679
2680 unix_state_unlock(sk);
2487 } 2681 }
2488 2682
2489 if (writable) 2683 if (writable)
2490 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2684 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2491 else 2685 else
2492 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2686 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2493 2687
2494 return mask; 2688 return mask;
2495} 2689}