aboutsummaryrefslogtreecommitdiffstats
path: root/net/unix/af_unix.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r--net/unix/af_unix.c169
1 files changed, 103 insertions, 66 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f2551190311..3c95304a081 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -117,7 +117,7 @@
117 117
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; 118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock); 119static DEFINE_SPINLOCK(unix_table_lock);
120static atomic_t unix_nr_socks = ATOMIC_INIT(0); 120static atomic_long_t unix_nr_socks;
121 121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) 122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123 123
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144/* 144/*
145 * SMP locking strategy: 145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock 146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock. 147 * each socket state is protected by separate spin lock.
148 */ 148 */
149 149
150static inline unsigned unix_hash_fold(__wsum n) 150static inline unsigned unix_hash_fold(__wsum n)
@@ -282,7 +282,7 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
282 return s; 282 return s;
283} 283}
284 284
285static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i) 285static struct sock *unix_find_socket_byinode(struct inode *i)
286{ 286{
287 struct sock *s; 287 struct sock *s;
288 struct hlist_node *node; 288 struct hlist_node *node;
@@ -292,9 +292,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry; 293 struct dentry *dentry = unix_sk(s)->dentry;
294 294
295 if (!net_eq(sock_net(s), net))
296 continue;
297
298 if (dentry && dentry->d_inode == i) { 295 if (dentry && dentry->d_inode == i) {
299 sock_hold(s); 296 sock_hold(s);
300 goto found; 297 goto found;
@@ -313,13 +310,16 @@ static inline int unix_writable(struct sock *sk)
313 310
314static void unix_write_space(struct sock *sk) 311static void unix_write_space(struct sock *sk)
315{ 312{
316 read_lock(&sk->sk_callback_lock); 313 struct socket_wq *wq;
314
315 rcu_read_lock();
317 if (unix_writable(sk)) { 316 if (unix_writable(sk)) {
318 if (sk_has_sleeper(sk)) 317 wq = rcu_dereference(sk->sk_wq);
319 wake_up_interruptible_sync(sk->sk_sleep); 318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 321 }
322 read_unlock(&sk->sk_callback_lock); 322 rcu_read_unlock();
323} 323}
324 324
325/* When dgram socket disconnects (or changes its peer), we clear its receive 325/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk)
360 if (u->addr) 360 if (u->addr)
361 unix_release_addr(u->addr); 361 unix_release_addr(u->addr);
362 362
363 atomic_dec(&unix_nr_socks); 363 atomic_long_dec(&unix_nr_socks);
364 local_bh_disable(); 364 local_bh_disable();
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
366 local_bh_enable(); 366 local_bh_enable();
367#ifdef UNIX_REFCNT_DEBUG 367#ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, 368 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
369 atomic_read(&unix_nr_socks)); 369 atomic_long_read(&unix_nr_socks));
370#endif 370#endif
371} 371}
372 372
@@ -406,9 +406,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
406 skpair->sk_err = ECONNRESET; 406 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair); 407 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair); 408 skpair->sk_state_change(skpair);
409 read_lock(&skpair->sk_callback_lock);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 409 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 read_unlock(&skpair->sk_callback_lock);
412 } 410 }
413 sock_put(skpair); /* It may now die */ 411 sock_put(skpair); /* It may now die */
414 unix_peer(sk) = NULL; 412 unix_peer(sk) = NULL;
@@ -449,11 +447,31 @@ static int unix_release_sock(struct sock *sk, int embrion)
449 return 0; 447 return 0;
450} 448}
451 449
450static void init_peercred(struct sock *sk)
451{
452 put_pid(sk->sk_peer_pid);
453 if (sk->sk_peer_cred)
454 put_cred(sk->sk_peer_cred);
455 sk->sk_peer_pid = get_pid(task_tgid(current));
456 sk->sk_peer_cred = get_current_cred();
457}
458
459static void copy_peercred(struct sock *sk, struct sock *peersk)
460{
461 put_pid(sk->sk_peer_pid);
462 if (sk->sk_peer_cred)
463 put_cred(sk->sk_peer_cred);
464 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
465 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
466}
467
452static int unix_listen(struct socket *sock, int backlog) 468static int unix_listen(struct socket *sock, int backlog)
453{ 469{
454 int err; 470 int err;
455 struct sock *sk = sock->sk; 471 struct sock *sk = sock->sk;
456 struct unix_sock *u = unix_sk(sk); 472 struct unix_sock *u = unix_sk(sk);
473 struct pid *old_pid = NULL;
474 const struct cred *old_cred = NULL;
457 475
458 err = -EOPNOTSUPP; 476 err = -EOPNOTSUPP;
459 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 477 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -469,12 +487,14 @@ static int unix_listen(struct socket *sock, int backlog)
469 sk->sk_max_ack_backlog = backlog; 487 sk->sk_max_ack_backlog = backlog;
470 sk->sk_state = TCP_LISTEN; 488 sk->sk_state = TCP_LISTEN;
471 /* set credentials so connect can copy them */ 489 /* set credentials so connect can copy them */
472 sk->sk_peercred.pid = task_tgid_vnr(current); 490 init_peercred(sk);
473 current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
474 err = 0; 491 err = 0;
475 492
476out_unlock: 493out_unlock:
477 unix_state_unlock(sk); 494 unix_state_unlock(sk);
495 put_pid(old_pid);
496 if (old_cred)
497 put_cred(old_cred);
478out: 498out:
479 return err; 499 return err;
480} 500}
@@ -586,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
586 struct sock *sk = NULL; 606 struct sock *sk = NULL;
587 struct unix_sock *u; 607 struct unix_sock *u;
588 608
589 atomic_inc(&unix_nr_socks); 609 atomic_long_inc(&unix_nr_socks);
590 if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) 610 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
591 goto out; 611 goto out;
592 612
593 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); 613 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
@@ -612,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
612 unix_insert_socket(unix_sockets_unbound, sk); 632 unix_insert_socket(unix_sockets_unbound, sk);
613out: 633out:
614 if (sk == NULL) 634 if (sk == NULL)
615 atomic_dec(&unix_nr_socks); 635 atomic_long_dec(&unix_nr_socks);
616 else { 636 else {
617 local_bh_disable(); 637 local_bh_disable();
618 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 638 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -672,6 +692,7 @@ static int unix_autobind(struct socket *sock)
672 static u32 ordernum = 1; 692 static u32 ordernum = 1;
673 struct unix_address *addr; 693 struct unix_address *addr;
674 int err; 694 int err;
695 unsigned int retries = 0;
675 696
676 mutex_lock(&u->readlock); 697 mutex_lock(&u->readlock);
677 698
@@ -697,9 +718,17 @@ retry:
697 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, 718 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
698 addr->hash)) { 719 addr->hash)) {
699 spin_unlock(&unix_table_lock); 720 spin_unlock(&unix_table_lock);
700 /* Sanity yield. It is unusual case, but yet... */ 721 /*
701 if (!(ordernum&0xFF)) 722 * __unix_find_socket_byname() may take long time if many names
702 yield(); 723 * are already in use.
724 */
725 cond_resched();
726 /* Give up if all names seems to be in use. */
727 if (retries++ == 0xFFFFF) {
728 err = -ENOSPC;
729 kfree(addr);
730 goto out;
731 }
703 goto retry; 732 goto retry;
704 } 733 }
705 addr->hash ^= sk->sk_type; 734 addr->hash ^= sk->sk_type;
@@ -735,7 +764,7 @@ static struct sock *unix_find_other(struct net *net,
735 err = -ECONNREFUSED; 764 err = -ECONNREFUSED;
736 if (!S_ISSOCK(inode->i_mode)) 765 if (!S_ISSOCK(inode->i_mode))
737 goto put_fail; 766 goto put_fail;
738 u = unix_find_socket_byinode(net, inode); 767 u = unix_find_socket_byinode(inode);
739 if (!u) 768 if (!u)
740 goto put_fail; 769 goto put_fail;
741 770
@@ -1139,10 +1168,9 @@ restart:
1139 unix_peer(newsk) = sk; 1168 unix_peer(newsk) = sk;
1140 newsk->sk_state = TCP_ESTABLISHED; 1169 newsk->sk_state = TCP_ESTABLISHED;
1141 newsk->sk_type = sk->sk_type; 1170 newsk->sk_type = sk->sk_type;
1142 newsk->sk_peercred.pid = task_tgid_vnr(current); 1171 init_peercred(newsk);
1143 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1144 newu = unix_sk(newsk); 1172 newu = unix_sk(newsk);
1145 newsk->sk_sleep = &newu->peer_wait; 1173 newsk->sk_wq = &newu->peer_wq;
1146 otheru = unix_sk(other); 1174 otheru = unix_sk(other);
1147 1175
1148 /* copy address information from listening to new sock*/ 1176 /* copy address information from listening to new sock*/
@@ -1156,7 +1184,7 @@ restart:
1156 } 1184 }
1157 1185
1158 /* Set credentials */ 1186 /* Set credentials */
1159 sk->sk_peercred = other->sk_peercred; 1187 copy_peercred(sk, other);
1160 1188
1161 sock->state = SS_CONNECTED; 1189 sock->state = SS_CONNECTED;
1162 sk->sk_state = TCP_ESTABLISHED; 1190 sk->sk_state = TCP_ESTABLISHED;
@@ -1198,10 +1226,8 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
1198 sock_hold(skb); 1226 sock_hold(skb);
1199 unix_peer(ska) = skb; 1227 unix_peer(ska) = skb;
1200 unix_peer(skb) = ska; 1228 unix_peer(skb) = ska;
1201 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current); 1229 init_peercred(ska);
1202 current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid); 1230 init_peercred(skb);
1203 ska->sk_peercred.uid = skb->sk_peercred.uid;
1204 ska->sk_peercred.gid = skb->sk_peercred.gid;
1205 1231
1206 if (ska->sk_type != SOCK_DGRAM) { 1232 if (ska->sk_type != SOCK_DGRAM) {
1207 ska->sk_state = TCP_ESTABLISHED; 1233 ska->sk_state = TCP_ESTABLISHED;
@@ -1296,18 +1322,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1296 int i; 1322 int i;
1297 1323
1298 scm->fp = UNIXCB(skb).fp; 1324 scm->fp = UNIXCB(skb).fp;
1299 skb->destructor = sock_wfree;
1300 UNIXCB(skb).fp = NULL; 1325 UNIXCB(skb).fp = NULL;
1301 1326
1302 for (i = scm->fp->count-1; i >= 0; i--) 1327 for (i = scm->fp->count-1; i >= 0; i--)
1303 unix_notinflight(scm->fp->fp[i]); 1328 unix_notinflight(scm->fp->fp[i]);
1304} 1329}
1305 1330
1306static void unix_destruct_fds(struct sk_buff *skb) 1331static void unix_destruct_scm(struct sk_buff *skb)
1307{ 1332{
1308 struct scm_cookie scm; 1333 struct scm_cookie scm;
1309 memset(&scm, 0, sizeof(scm)); 1334 memset(&scm, 0, sizeof(scm));
1310 unix_detach_fds(&scm, skb); 1335 scm.pid = UNIXCB(skb).pid;
1336 scm.cred = UNIXCB(skb).cred;
1337 if (UNIXCB(skb).fp)
1338 unix_detach_fds(&scm, skb);
1311 1339
1312 /* Alas, it calls VFS */ 1340 /* Alas, it calls VFS */
1313 /* So fscking what? fput() had been SMP-safe since the last Summer */ 1341 /* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1330,10 +1358,22 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1330 1358
1331 for (i = scm->fp->count-1; i >= 0; i--) 1359 for (i = scm->fp->count-1; i >= 0; i--)
1332 unix_inflight(scm->fp->fp[i]); 1360 unix_inflight(scm->fp->fp[i]);
1333 skb->destructor = unix_destruct_fds;
1334 return 0; 1361 return 0;
1335} 1362}
1336 1363
1364static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1365{
1366 int err = 0;
1367 UNIXCB(skb).pid = get_pid(scm->pid);
1368 UNIXCB(skb).cred = get_cred(scm->cred);
1369 UNIXCB(skb).fp = NULL;
1370 if (scm->fp && send_fds)
1371 err = unix_attach_fds(scm, skb);
1372
1373 skb->destructor = unix_destruct_scm;
1374 return err;
1375}
1376
1337/* 1377/*
1338 * Send AF_UNIX data. 1378 * Send AF_UNIX data.
1339 */ 1379 */
@@ -1390,12 +1430,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1390 if (skb == NULL) 1430 if (skb == NULL)
1391 goto out; 1431 goto out;
1392 1432
1393 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1433 err = unix_scm_to_skb(siocb->scm, skb, true);
1394 if (siocb->scm->fp) { 1434 if (err)
1395 err = unix_attach_fds(siocb->scm, skb); 1435 goto out_free;
1396 if (err)
1397 goto out_free;
1398 }
1399 unix_get_secdata(siocb->scm, skb); 1436 unix_get_secdata(siocb->scm, skb);
1400 1437
1401 skb_reset_transport_header(skb); 1438 skb_reset_transport_header(skb);
@@ -1474,6 +1511,8 @@ restart:
1474 goto restart; 1511 goto restart;
1475 } 1512 }
1476 1513
1514 if (sock_flag(other, SOCK_RCVTSTAMP))
1515 __net_timestamp(skb);
1477 skb_queue_tail(&other->sk_receive_queue, skb); 1516 skb_queue_tail(&other->sk_receive_queue, skb);
1478 unix_state_unlock(other); 1517 unix_state_unlock(other);
1479 other->sk_data_ready(other, len); 1518 other->sk_data_ready(other, len);
@@ -1565,16 +1604,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1565 */ 1604 */
1566 size = min_t(int, size, skb_tailroom(skb)); 1605 size = min_t(int, size, skb_tailroom(skb));
1567 1606
1568 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1607
1569 /* Only send the fds in the first buffer */ 1608 /* Only send the fds in the first buffer */
1570 if (siocb->scm->fp && !fds_sent) { 1609 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1571 err = unix_attach_fds(siocb->scm, skb); 1610 if (err) {
1572 if (err) { 1611 kfree_skb(skb);
1573 kfree_skb(skb); 1612 goto out_err;
1574 goto out_err;
1575 }
1576 fds_sent = true;
1577 } 1613 }
1614 fds_sent = true;
1578 1615
1579 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1616 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1580 if (err) { 1617 if (err) {
@@ -1687,11 +1724,14 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1687 if (err) 1724 if (err)
1688 goto out_free; 1725 goto out_free;
1689 1726
1727 if (sock_flag(sk, SOCK_RCVTSTAMP))
1728 __sock_recv_timestamp(msg, sk, skb);
1729
1690 if (!siocb->scm) { 1730 if (!siocb->scm) {
1691 siocb->scm = &tmp_scm; 1731 siocb->scm = &tmp_scm;
1692 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1732 memset(&tmp_scm, 0, sizeof(tmp_scm));
1693 } 1733 }
1694 siocb->scm->creds = *UNIXCREDS(skb); 1734 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1695 unix_set_secdata(siocb->scm, skb); 1735 unix_set_secdata(siocb->scm, skb);
1696 1736
1697 if (!(flags & MSG_PEEK)) { 1737 if (!(flags & MSG_PEEK)) {
@@ -1736,7 +1776,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1736 unix_state_lock(sk); 1776 unix_state_lock(sk);
1737 1777
1738 for (;;) { 1778 for (;;) {
1739 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1779 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1740 1780
1741 if (!skb_queue_empty(&sk->sk_receive_queue) || 1781 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1742 sk->sk_err || 1782 sk->sk_err ||
@@ -1752,7 +1792,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1792 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 } 1793 }
1754 1794
1755 finish_wait(sk->sk_sleep, &wait); 1795 finish_wait(sk_sleep(sk), &wait);
1756 unix_state_unlock(sk); 1796 unix_state_unlock(sk);
1757 return timeo; 1797 return timeo;
1758} 1798}
@@ -1840,14 +1880,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1840 1880
1841 if (check_creds) { 1881 if (check_creds) {
1842 /* Never glue messages from different writers */ 1882 /* Never glue messages from different writers */
1843 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, 1883 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1844 sizeof(siocb->scm->creds)) != 0) { 1884 (UNIXCB(skb).cred != siocb->scm->cred)) {
1845 skb_queue_head(&sk->sk_receive_queue, skb); 1885 skb_queue_head(&sk->sk_receive_queue, skb);
1846 break; 1886 break;
1847 } 1887 }
1848 } else { 1888 } else {
1849 /* Copy credentials */ 1889 /* Copy credentials */
1850 siocb->scm->creds = *UNIXCREDS(skb); 1890 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1851 check_creds = 1; 1891 check_creds = 1;
1852 } 1892 }
1853 1893
@@ -1880,7 +1920,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1880 break; 1920 break;
1881 } 1921 }
1882 1922
1883 kfree_skb(skb); 1923 consume_skb(skb);
1884 1924
1885 if (siocb->scm->fp) 1925 if (siocb->scm->fp)
1886 break; 1926 break;
@@ -1931,12 +1971,10 @@ static int unix_shutdown(struct socket *sock, int mode)
1931 other->sk_shutdown |= peer_mode; 1971 other->sk_shutdown |= peer_mode;
1932 unix_state_unlock(other); 1972 unix_state_unlock(other);
1933 other->sk_state_change(other); 1973 other->sk_state_change(other);
1934 read_lock(&other->sk_callback_lock);
1935 if (peer_mode == SHUTDOWN_MASK) 1974 if (peer_mode == SHUTDOWN_MASK)
1936 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 1975 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1937 else if (peer_mode & RCV_SHUTDOWN) 1976 else if (peer_mode & RCV_SHUTDOWN)
1938 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 1977 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1939 read_unlock(&other->sk_callback_lock);
1940 } 1978 }
1941 if (other) 1979 if (other)
1942 sock_put(other); 1980 sock_put(other);
@@ -1991,7 +2029,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
1991 struct sock *sk = sock->sk; 2029 struct sock *sk = sock->sk;
1992 unsigned int mask; 2030 unsigned int mask;
1993 2031
1994 sock_poll_wait(file, sk->sk_sleep, wait); 2032 sock_poll_wait(file, sk_sleep(sk), wait);
1995 mask = 0; 2033 mask = 0;
1996 2034
1997 /* exceptional events? */ 2035 /* exceptional events? */
@@ -2000,11 +2038,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
2000 if (sk->sk_shutdown == SHUTDOWN_MASK) 2038 if (sk->sk_shutdown == SHUTDOWN_MASK)
2001 mask |= POLLHUP; 2039 mask |= POLLHUP;
2002 if (sk->sk_shutdown & RCV_SHUTDOWN) 2040 if (sk->sk_shutdown & RCV_SHUTDOWN)
2003 mask |= POLLRDHUP; 2041 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2004 2042
2005 /* readable? */ 2043 /* readable? */
2006 if (!skb_queue_empty(&sk->sk_receive_queue) || 2044 if (!skb_queue_empty(&sk->sk_receive_queue))
2007 (sk->sk_shutdown & RCV_SHUTDOWN))
2008 mask |= POLLIN | POLLRDNORM; 2045 mask |= POLLIN | POLLRDNORM;
2009 2046
2010 /* Connection-based need to check for termination and startup */ 2047 /* Connection-based need to check for termination and startup */
@@ -2028,7 +2065,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2028 struct sock *sk = sock->sk, *other; 2065 struct sock *sk = sock->sk, *other;
2029 unsigned int mask, writable; 2066 unsigned int mask, writable;
2030 2067
2031 sock_poll_wait(file, sk->sk_sleep, wait); 2068 sock_poll_wait(file, sk_sleep(sk), wait);
2032 mask = 0; 2069 mask = 0;
2033 2070
2034 /* exceptional events? */ 2071 /* exceptional events? */
@@ -2224,7 +2261,7 @@ static const struct net_proto_family unix_family_ops = {
2224}; 2261};
2225 2262
2226 2263
2227static int unix_net_init(struct net *net) 2264static int __net_init unix_net_init(struct net *net)
2228{ 2265{
2229 int error = -ENOMEM; 2266 int error = -ENOMEM;
2230 2267
@@ -2243,7 +2280,7 @@ out:
2243 return error; 2280 return error;
2244} 2281}
2245 2282
2246static void unix_net_exit(struct net *net) 2283static void __net_exit unix_net_exit(struct net *net)
2247{ 2284{
2248 unix_sysctl_unregister(net); 2285 unix_sysctl_unregister(net);
2249 proc_net_remove(net, "unix"); 2286 proc_net_remove(net, "unix");