diff options
Diffstat (limited to 'net/unix/af_unix.c')
-rw-r--r-- | net/unix/af_unix.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b..3c95304a081 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -117,7 +117,7 @@ | |||
117 | 117 | ||
118 | static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; | 118 | static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; |
119 | static DEFINE_SPINLOCK(unix_table_lock); | 119 | static DEFINE_SPINLOCK(unix_table_lock); |
120 | static atomic_t unix_nr_socks = ATOMIC_INIT(0); | 120 | static atomic_long_t unix_nr_socks; |
121 | 121 | ||
122 | #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) | 122 | #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) |
123 | 123 | ||
@@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk) | |||
360 | if (u->addr) | 360 | if (u->addr) |
361 | unix_release_addr(u->addr); | 361 | unix_release_addr(u->addr); |
362 | 362 | ||
363 | atomic_dec(&unix_nr_socks); | 363 | atomic_long_dec(&unix_nr_socks); |
364 | local_bh_disable(); | 364 | local_bh_disable(); |
365 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 365 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
366 | local_bh_enable(); | 366 | local_bh_enable(); |
367 | #ifdef UNIX_REFCNT_DEBUG | 367 | #ifdef UNIX_REFCNT_DEBUG |
368 | printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, | 368 | printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk, |
369 | atomic_read(&unix_nr_socks)); | 369 | atomic_long_read(&unix_nr_socks)); |
370 | #endif | 370 | #endif |
371 | } | 371 | } |
372 | 372 | ||
@@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) | |||
606 | struct sock *sk = NULL; | 606 | struct sock *sk = NULL; |
607 | struct unix_sock *u; | 607 | struct unix_sock *u; |
608 | 608 | ||
609 | atomic_inc(&unix_nr_socks); | 609 | atomic_long_inc(&unix_nr_socks); |
610 | if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) | 610 | if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) |
611 | goto out; | 611 | goto out; |
612 | 612 | ||
613 | sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); | 613 | sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto); |
@@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) | |||
632 | unix_insert_socket(unix_sockets_unbound, sk); | 632 | unix_insert_socket(unix_sockets_unbound, sk); |
633 | out: | 633 | out: |
634 | if (sk == NULL) | 634 | if (sk == NULL) |
635 | atomic_dec(&unix_nr_socks); | 635 | atomic_long_dec(&unix_nr_socks); |
636 | else { | 636 | else { |
637 | local_bh_disable(); | 637 | local_bh_disable(); |
638 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 638 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) | |||
692 | static u32 ordernum = 1; | 692 | static u32 ordernum = 1; |
693 | struct unix_address *addr; | 693 | struct unix_address *addr; |
694 | int err; | 694 | int err; |
695 | unsigned int retries = 0; | ||
695 | 696 | ||
696 | mutex_lock(&u->readlock); | 697 | mutex_lock(&u->readlock); |
697 | 698 | ||
@@ -717,9 +718,17 @@ retry: | |||
717 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, | 718 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, |
718 | addr->hash)) { | 719 | addr->hash)) { |
719 | spin_unlock(&unix_table_lock); | 720 | spin_unlock(&unix_table_lock); |
720 | /* Sanity yield. It is unusual case, but yet... */ | 721 | /* |
721 | if (!(ordernum&0xFF)) | 722 | * __unix_find_socket_byname() may take long time if many names |
722 | yield(); | 723 | * are already in use. |
724 | */ | ||
725 | cond_resched(); | ||
726 | /* Give up if all names seems to be in use. */ | ||
727 | if (retries++ == 0xFFFFF) { | ||
728 | err = -ENOSPC; | ||
729 | kfree(addr); | ||
730 | goto out; | ||
731 | } | ||
723 | goto retry; | 732 | goto retry; |
724 | } | 733 | } |
725 | addr->hash ^= sk->sk_type; | 734 | addr->hash ^= sk->sk_type; |
@@ -1502,6 +1511,8 @@ restart: | |||
1502 | goto restart; | 1511 | goto restart; |
1503 | } | 1512 | } |
1504 | 1513 | ||
1514 | if (sock_flag(other, SOCK_RCVTSTAMP)) | ||
1515 | __net_timestamp(skb); | ||
1505 | skb_queue_tail(&other->sk_receive_queue, skb); | 1516 | skb_queue_tail(&other->sk_receive_queue, skb); |
1506 | unix_state_unlock(other); | 1517 | unix_state_unlock(other); |
1507 | other->sk_data_ready(other, len); | 1518 | other->sk_data_ready(other, len); |
@@ -1713,6 +1724,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1713 | if (err) | 1724 | if (err) |
1714 | goto out_free; | 1725 | goto out_free; |
1715 | 1726 | ||
1727 | if (sock_flag(sk, SOCK_RCVTSTAMP)) | ||
1728 | __sock_recv_timestamp(msg, sk, skb); | ||
1729 | |||
1716 | if (!siocb->scm) { | 1730 | if (!siocb->scm) { |
1717 | siocb->scm = &tmp_scm; | 1731 | siocb->scm = &tmp_scm; |
1718 | memset(&tmp_scm, 0, sizeof(tmp_scm)); | 1732 | memset(&tmp_scm, 0, sizeof(tmp_scm)); |
@@ -2024,11 +2038,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table | |||
2024 | if (sk->sk_shutdown == SHUTDOWN_MASK) | 2038 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
2025 | mask |= POLLHUP; | 2039 | mask |= POLLHUP; |
2026 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 2040 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
2027 | mask |= POLLRDHUP; | 2041 | mask |= POLLRDHUP | POLLIN | POLLRDNORM; |
2028 | 2042 | ||
2029 | /* readable? */ | 2043 | /* readable? */ |
2030 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 2044 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
2031 | (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
2032 | mask |= POLLIN | POLLRDNORM; | 2045 | mask |= POLLIN | POLLRDNORM; |
2033 | 2046 | ||
2034 | /* Connection-based need to check for termination and startup */ | 2047 | /* Connection-based need to check for termination and startup */ |