aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /net/unix/af_unix.c
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c281
1 files changed, 234 insertions, 47 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 955ec152cb71..a4631477cedf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
326 return s; 326 return s;
327} 327}
328 328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
329static int unix_writable(const struct sock *sk) 441static int unix_writable(const struct sock *sk)
330{ 442{
331 return sk->sk_state != TCP_LISTEN && 443 return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
431 skpair->sk_state_change(skpair); 543 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
433 } 545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
434 sock_put(skpair); /* It may now die */ 548 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL; 549 unix_peer(sk) = NULL;
436 } 550 }
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
666 INIT_LIST_HEAD(&u->link); 780 INIT_LIST_HEAD(&u->link);
667 mutex_init(&u->readlock); /* single task reading lock */ 781 mutex_init(&u->readlock); /* single task reading lock */
668 init_waitqueue_head(&u->peer_wait); 782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
669 unix_insert_socket(unix_sockets_unbound(sk), sk); 784 unix_insert_socket(unix_sockets_unbound(sk), sk);
670out: 785out:
671 if (sk == NULL) 786 if (sk == NULL)
@@ -1033,6 +1148,8 @@ restart:
1033 if (unix_peer(sk)) { 1148 if (unix_peer(sk)) {
1034 struct sock *old_peer = unix_peer(sk); 1149 struct sock *old_peer = unix_peer(sk);
1035 unix_peer(sk) = other; 1150 unix_peer(sk) = other;
1151 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1152
1036 unix_state_double_unlock(sk, other); 1153 unix_state_double_unlock(sk, other);
1037 1154
1038 if (other != old_peer) 1155 if (other != old_peer)
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1434 return err; 1551 return err;
1435} 1552}
1436 1553
1554static bool unix_passcred_enabled(const struct socket *sock,
1555 const struct sock *other)
1556{
1557 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1558 !other->sk_socket ||
1559 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1560}
1561
1437/* 1562/*
1438 * Some apps rely on write() giving SCM_CREDENTIALS 1563 * Some apps rely on write() giving SCM_CREDENTIALS
1439 * We include credentials if source or destination socket 1564 * We include credentials if source or destination socket
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1444{ 1569{
1445 if (UNIXCB(skb).pid) 1570 if (UNIXCB(skb).pid)
1446 return; 1571 return;
1447 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1572 if (unix_passcred_enabled(sock, other)) {
1448 !other->sk_socket ||
1449 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1450 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1573 UNIXCB(skb).pid = get_pid(task_tgid(current));
1451 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1574 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1452 } 1575 }
1453} 1576}
1454 1577
1578static int maybe_init_creds(struct scm_cookie *scm,
1579 struct socket *socket,
1580 const struct sock *other)
1581{
1582 int err;
1583 struct msghdr msg = { .msg_controllen = 0 };
1584
1585 err = scm_send(socket, &msg, scm, false);
1586 if (err)
1587 return err;
1588
1589 if (unix_passcred_enabled(socket, other)) {
1590 scm->pid = get_pid(task_tgid(current));
1591 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1592 }
1593 return err;
1594}
1595
1596static bool unix_skb_scm_eq(struct sk_buff *skb,
1597 struct scm_cookie *scm)
1598{
1599 const struct unix_skb_parms *u = &UNIXCB(skb);
1600
1601 return u->pid == scm->pid &&
1602 uid_eq(u->uid, scm->creds.uid) &&
1603 gid_eq(u->gid, scm->creds.gid) &&
1604 unix_secdata_eq(scm, skb);
1605}
1606
1455/* 1607/*
1456 * Send AF_UNIX data. 1608 * Send AF_UNIX data.
1457 */ 1609 */
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1472 struct scm_cookie scm; 1624 struct scm_cookie scm;
1473 int max_level; 1625 int max_level;
1474 int data_len = 0; 1626 int data_len = 0;
1627 int sk_locked;
1475 1628
1476 wait_for_unix_gc(); 1629 wait_for_unix_gc();
1477 err = scm_send(sock, msg, &scm, false); 1630 err = scm_send(sock, msg, &scm, false);
@@ -1550,12 +1703,14 @@ restart:
1550 goto out_free; 1703 goto out_free;
1551 } 1704 }
1552 1705
1706 sk_locked = 0;
1553 unix_state_lock(other); 1707 unix_state_lock(other);
1708restart_locked:
1554 err = -EPERM; 1709 err = -EPERM;
1555 if (!unix_may_send(sk, other)) 1710 if (!unix_may_send(sk, other))
1556 goto out_unlock; 1711 goto out_unlock;
1557 1712
1558 if (sock_flag(other, SOCK_DEAD)) { 1713 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1559 /* 1714 /*
1560 * Check with 1003.1g - what should 1715 * Check with 1003.1g - what should
1561 * datagram error 1716 * datagram error
@@ -1563,10 +1718,14 @@ restart:
1563 unix_state_unlock(other); 1718 unix_state_unlock(other);
1564 sock_put(other); 1719 sock_put(other);
1565 1720
1721 if (!sk_locked)
1722 unix_state_lock(sk);
1723
1566 err = 0; 1724 err = 0;
1567 unix_state_lock(sk);
1568 if (unix_peer(sk) == other) { 1725 if (unix_peer(sk) == other) {
1569 unix_peer(sk) = NULL; 1726 unix_peer(sk) = NULL;
1727 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1728
1570 unix_state_unlock(sk); 1729 unix_state_unlock(sk);
1571 1730
1572 unix_dgram_disconnected(sk, other); 1731 unix_dgram_disconnected(sk, other);
@@ -1592,21 +1751,38 @@ restart:
1592 goto out_unlock; 1751 goto out_unlock;
1593 } 1752 }
1594 1753
1595 if (unix_peer(other) != sk && unix_recvq_full(other)) { 1754 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1596 if (!timeo) { 1755 if (timeo) {
1597 err = -EAGAIN; 1756 timeo = unix_wait_for_peer(other, timeo);
1598 goto out_unlock; 1757
1758 err = sock_intr_errno(timeo);
1759 if (signal_pending(current))
1760 goto out_free;
1761
1762 goto restart;
1599 } 1763 }
1600 1764
1601 timeo = unix_wait_for_peer(other, timeo); 1765 if (!sk_locked) {
1766 unix_state_unlock(other);
1767 unix_state_double_lock(sk, other);
1768 }
1602 1769
1603 err = sock_intr_errno(timeo); 1770 if (unix_peer(sk) != other ||
1604 if (signal_pending(current)) 1771 unix_dgram_peer_wake_me(sk, other)) {
1605 goto out_free; 1772 err = -EAGAIN;
1773 sk_locked = 1;
1774 goto out_unlock;
1775 }
1606 1776
1607 goto restart; 1777 if (!sk_locked) {
1778 sk_locked = 1;
1779 goto restart_locked;
1780 }
1608 } 1781 }
1609 1782
1783 if (unlikely(sk_locked))
1784 unix_state_unlock(sk);
1785
1610 if (sock_flag(other, SOCK_RCVTSTAMP)) 1786 if (sock_flag(other, SOCK_RCVTSTAMP))
1611 __net_timestamp(skb); 1787 __net_timestamp(skb);
1612 maybe_add_creds(skb, sock, other); 1788 maybe_add_creds(skb, sock, other);
@@ -1620,6 +1796,8 @@ restart:
1620 return len; 1796 return len;
1621 1797
1622out_unlock: 1798out_unlock:
1799 if (sk_locked)
1800 unix_state_unlock(sk);
1623 unix_state_unlock(other); 1801 unix_state_unlock(other);
1624out_free: 1802out_free:
1625 kfree_skb(skb); 1803 kfree_skb(skb);
@@ -1741,8 +1919,10 @@ out_err:
1741static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, 1919static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1742 int offset, size_t size, int flags) 1920 int offset, size_t size, int flags)
1743{ 1921{
1744 int err = 0; 1922 int err;
1745 bool send_sigpipe = true; 1923 bool send_sigpipe = false;
1924 bool init_scm = true;
1925 struct scm_cookie scm;
1746 struct sock *other, *sk = socket->sk; 1926 struct sock *other, *sk = socket->sk;
1747 struct sk_buff *skb, *newskb = NULL, *tail = NULL; 1927 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1748 1928
@@ -1760,7 +1940,7 @@ alloc_skb:
1760 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1940 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1761 &err, 0); 1941 &err, 0);
1762 if (!newskb) 1942 if (!newskb)
1763 return err; 1943 goto err;
1764 } 1944 }
1765 1945
1766 /* we must acquire readlock as we modify already present 1946 /* we must acquire readlock as we modify already present
@@ -1769,12 +1949,12 @@ alloc_skb:
1769 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1949 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1770 if (err) { 1950 if (err) {
1771 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1951 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1772 send_sigpipe = false;
1773 goto err; 1952 goto err;
1774 } 1953 }
1775 1954
1776 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1955 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1777 err = -EPIPE; 1956 err = -EPIPE;
1957 send_sigpipe = true;
1778 goto err_unlock; 1958 goto err_unlock;
1779 } 1959 }
1780 1960
@@ -1783,17 +1963,27 @@ alloc_skb:
1783 if (sock_flag(other, SOCK_DEAD) || 1963 if (sock_flag(other, SOCK_DEAD) ||
1784 other->sk_shutdown & RCV_SHUTDOWN) { 1964 other->sk_shutdown & RCV_SHUTDOWN) {
1785 err = -EPIPE; 1965 err = -EPIPE;
1966 send_sigpipe = true;
1786 goto err_state_unlock; 1967 goto err_state_unlock;
1787 } 1968 }
1788 1969
1970 if (init_scm) {
1971 err = maybe_init_creds(&scm, socket, other);
1972 if (err)
1973 goto err_state_unlock;
1974 init_scm = false;
1975 }
1976
1789 skb = skb_peek_tail(&other->sk_receive_queue); 1977 skb = skb_peek_tail(&other->sk_receive_queue);
1790 if (tail && tail == skb) { 1978 if (tail && tail == skb) {
1791 skb = newskb; 1979 skb = newskb;
1792 } else if (!skb) { 1980 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1793 if (newskb) 1981 if (newskb) {
1794 skb = newskb; 1982 skb = newskb;
1795 else 1983 } else {
1984 tail = skb;
1796 goto alloc_skb; 1985 goto alloc_skb;
1986 }
1797 } else if (newskb) { 1987 } else if (newskb) {
1798 /* this is fast path, we don't necessarily need to 1988 /* this is fast path, we don't necessarily need to
1799 * call to kfree_skb even though with newskb == NULL 1989 * call to kfree_skb even though with newskb == NULL
@@ -1814,6 +2004,9 @@ alloc_skb:
1814 atomic_add(size, &sk->sk_wmem_alloc); 2004 atomic_add(size, &sk->sk_wmem_alloc);
1815 2005
1816 if (newskb) { 2006 if (newskb) {
2007 err = unix_scm_to_skb(&scm, skb, false);
2008 if (err)
2009 goto err_state_unlock;
1817 spin_lock(&other->sk_receive_queue.lock); 2010 spin_lock(&other->sk_receive_queue.lock);
1818 __skb_queue_tail(&other->sk_receive_queue, newskb); 2011 __skb_queue_tail(&other->sk_receive_queue, newskb);
1819 spin_unlock(&other->sk_receive_queue.lock); 2012 spin_unlock(&other->sk_receive_queue.lock);
@@ -1823,7 +2016,7 @@ alloc_skb:
1823 mutex_unlock(&unix_sk(other)->readlock); 2016 mutex_unlock(&unix_sk(other)->readlock);
1824 2017
1825 other->sk_data_ready(other); 2018 other->sk_data_ready(other);
1826 2019 scm_destroy(&scm);
1827 return size; 2020 return size;
1828 2021
1829err_state_unlock: 2022err_state_unlock:
@@ -1834,6 +2027,8 @@ err:
1834 kfree_skb(newskb); 2027 kfree_skb(newskb);
1835 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2028 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1836 send_sig(SIGPIPE, current, 0); 2029 send_sig(SIGPIPE, current, 0);
2030 if (!init_scm)
2031 scm_destroy(&scm);
1837 return err; 2032 return err;
1838} 2033}
1839 2034
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1996 !timeo) 2191 !timeo)
1997 break; 2192 break;
1998 2193
1999 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2194 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2000 unix_state_unlock(sk); 2195 unix_state_unlock(sk);
2001 timeo = freezable_schedule_timeout(timeo); 2196 timeo = freezable_schedule_timeout(timeo);
2002 unix_state_lock(sk); 2197 unix_state_lock(sk);
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
2004 if (sock_flag(sk, SOCK_DEAD)) 2199 if (sock_flag(sk, SOCK_DEAD))
2005 break; 2200 break;
2006 2201
2007 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2202 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2008 } 2203 }
2009 2204
2010 finish_wait(sk_sleep(sk), &wait); 2205 finish_wait(sk_sleep(sk), &wait);
@@ -2061,14 +2256,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2061 /* Lock the socket to prevent queue disordering 2256 /* Lock the socket to prevent queue disordering
2062 * while sleeps in memcpy_tomsg 2257 * while sleeps in memcpy_tomsg
2063 */ 2258 */
2064 err = mutex_lock_interruptible(&u->readlock); 2259 mutex_lock(&u->readlock);
2065 if (unlikely(err)) {
2066 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2067 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2068 */
2069 err = noblock ? -EAGAIN : -ERESTARTSYS;
2070 goto out;
2071 }
2072 2260
2073 if (flags & MSG_PEEK) 2261 if (flags & MSG_PEEK)
2074 skip = sk_peek_offset(sk, flags); 2262 skip = sk_peek_offset(sk, flags);
@@ -2112,12 +2300,12 @@ again:
2112 timeo = unix_stream_data_wait(sk, timeo, last, 2300 timeo = unix_stream_data_wait(sk, timeo, last,
2113 last_len); 2301 last_len);
2114 2302
2115 if (signal_pending(current) || 2303 if (signal_pending(current)) {
2116 mutex_lock_interruptible(&u->readlock)) {
2117 err = sock_intr_errno(timeo); 2304 err = sock_intr_errno(timeo);
2118 goto out; 2305 goto out;
2119 } 2306 }
2120 2307
2308 mutex_lock(&u->readlock);
2121 continue; 2309 continue;
2122unlock: 2310unlock:
2123 unix_state_unlock(sk); 2311 unix_state_unlock(sk);
@@ -2137,10 +2325,7 @@ unlock:
2137 2325
2138 if (check_creds) { 2326 if (check_creds) {
2139 /* Never glue messages from different writers */ 2327 /* Never glue messages from different writers */
2140 if ((UNIXCB(skb).pid != scm.pid) || 2328 if (!unix_skb_scm_eq(skb, &scm))
2141 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2142 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2143 !unix_secdata_eq(&scm, skb))
2144 break; 2329 break;
2145 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { 2330 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2146 /* Copy credentials */ 2331 /* Copy credentials */
@@ -2476,20 +2661,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2476 return mask; 2661 return mask;
2477 2662
2478 writable = unix_writable(sk); 2663 writable = unix_writable(sk);
2479 other = unix_peer_get(sk); 2664 if (writable) {
2480 if (other) { 2665 unix_state_lock(sk);
2481 if (unix_peer(other) != sk) { 2666
2482 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); 2667 other = unix_peer(sk);
2483 if (unix_recvq_full(other)) 2668 if (other && unix_peer(other) != sk &&
2484 writable = 0; 2669 unix_recvq_full(other) &&
2485 } 2670 unix_dgram_peer_wake_me(sk, other))
2486 sock_put(other); 2671 writable = 0;
2672
2673 unix_state_unlock(sk);
2487 } 2674 }
2488 2675
2489 if (writable) 2676 if (writable)
2490 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2677 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2491 else 2678 else
2492 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2679 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2493 2680
2494 return mask; 2681 return mask;
2495} 2682}