diff options
Diffstat (limited to 'net/unix')
| -rw-r--r-- | net/unix/af_unix.c | 110 | ||||
| -rw-r--r-- | net/unix/diag.c | 115 |
2 files changed, 117 insertions, 108 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 641f2e47f165..79981d97bc9c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -115,15 +115,24 @@ | |||
| 115 | #include <net/checksum.h> | 115 | #include <net/checksum.h> |
| 116 | #include <linux/security.h> | 116 | #include <linux/security.h> |
| 117 | 117 | ||
| 118 | struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; | 118 | struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; |
| 119 | EXPORT_SYMBOL_GPL(unix_socket_table); | 119 | EXPORT_SYMBOL_GPL(unix_socket_table); |
| 120 | DEFINE_SPINLOCK(unix_table_lock); | 120 | DEFINE_SPINLOCK(unix_table_lock); |
| 121 | EXPORT_SYMBOL_GPL(unix_table_lock); | 121 | EXPORT_SYMBOL_GPL(unix_table_lock); |
| 122 | static atomic_long_t unix_nr_socks; | 122 | static atomic_long_t unix_nr_socks; |
| 123 | 123 | ||
| 124 | #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE]) | ||
| 125 | 124 | ||
| 126 | #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) | 125 | static struct hlist_head *unix_sockets_unbound(void *addr) |
| 126 | { | ||
| 127 | unsigned long hash = (unsigned long)addr; | ||
| 128 | |||
| 129 | hash ^= hash >> 16; | ||
| 130 | hash ^= hash >> 8; | ||
| 131 | hash %= UNIX_HASH_SIZE; | ||
| 132 | return &unix_socket_table[UNIX_HASH_SIZE + hash]; | ||
| 133 | } | ||
| 134 | |||
| 135 | #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) | ||
| 127 | 136 | ||
| 128 | #ifdef CONFIG_SECURITY_NETWORK | 137 | #ifdef CONFIG_SECURITY_NETWORK |
| 129 | static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) | 138 | static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) |
| @@ -645,7 +654,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock) | |||
| 645 | INIT_LIST_HEAD(&u->link); | 654 | INIT_LIST_HEAD(&u->link); |
| 646 | mutex_init(&u->readlock); /* single task reading lock */ | 655 | mutex_init(&u->readlock); /* single task reading lock */ |
| 647 | init_waitqueue_head(&u->peer_wait); | 656 | init_waitqueue_head(&u->peer_wait); |
| 648 | unix_insert_socket(unix_sockets_unbound, sk); | 657 | unix_insert_socket(unix_sockets_unbound(sk), sk); |
| 649 | out: | 658 | out: |
| 650 | if (sk == NULL) | 659 | if (sk == NULL) |
| 651 | atomic_long_dec(&unix_nr_socks); | 660 | atomic_long_dec(&unix_nr_socks); |
| @@ -2239,47 +2248,54 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, | |||
| 2239 | } | 2248 | } |
| 2240 | 2249 | ||
| 2241 | #ifdef CONFIG_PROC_FS | 2250 | #ifdef CONFIG_PROC_FS |
| 2242 | static struct sock *first_unix_socket(int *i) | 2251 | |
| 2252 | #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) | ||
| 2253 | |||
| 2254 | #define get_bucket(x) ((x) >> BUCKET_SPACE) | ||
| 2255 | #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1)) | ||
| 2256 | #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) | ||
| 2257 | |||
| 2258 | static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) | ||
| 2243 | { | 2259 | { |
| 2244 | for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) { | 2260 | unsigned long offset = get_offset(*pos); |
| 2245 | if (!hlist_empty(&unix_socket_table[*i])) | 2261 | unsigned long bucket = get_bucket(*pos); |
| 2246 | return __sk_head(&unix_socket_table[*i]); | 2262 | struct sock *sk; |
| 2263 | unsigned long count = 0; | ||
| 2264 | |||
| 2265 | for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { | ||
| 2266 | if (sock_net(sk) != seq_file_net(seq)) | ||
| 2267 | continue; | ||
| 2268 | if (++count == offset) | ||
| 2269 | break; | ||
| 2247 | } | 2270 | } |
| 2248 | return NULL; | 2271 | |
| 2272 | return sk; | ||
| 2249 | } | 2273 | } |
| 2250 | 2274 | ||
| 2251 | static struct sock *next_unix_socket(int *i, struct sock *s) | 2275 | static struct sock *unix_next_socket(struct seq_file *seq, |
| 2276 | struct sock *sk, | ||
| 2277 | loff_t *pos) | ||
| 2252 | { | 2278 | { |
| 2253 | struct sock *next = sk_next(s); | 2279 | unsigned long bucket; |
| 2254 | /* More in this chain? */ | 2280 | |
| 2255 | if (next) | 2281 | while (sk > (struct sock *)SEQ_START_TOKEN) { |
| 2256 | return next; | 2282 | sk = sk_next(sk); |
| 2257 | /* Look for next non-empty chain. */ | 2283 | if (!sk) |
| 2258 | for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) { | 2284 | goto next_bucket; |
| 2259 | if (!hlist_empty(&unix_socket_table[*i])) | 2285 | if (sock_net(sk) == seq_file_net(seq)) |
| 2260 | return __sk_head(&unix_socket_table[*i]); | 2286 | return sk; |
| 2261 | } | 2287 | } |
| 2262 | return NULL; | ||
| 2263 | } | ||
| 2264 | 2288 | ||
| 2265 | struct unix_iter_state { | 2289 | do { |
| 2266 | struct seq_net_private p; | 2290 | sk = unix_from_bucket(seq, pos); |
| 2267 | int i; | 2291 | if (sk) |
| 2268 | }; | 2292 | return sk; |
| 2269 | 2293 | ||
| 2270 | static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos) | 2294 | next_bucket: |
| 2271 | { | 2295 | bucket = get_bucket(*pos) + 1; |
| 2272 | struct unix_iter_state *iter = seq->private; | 2296 | *pos = set_bucket_offset(bucket, 1); |
| 2273 | loff_t off = 0; | 2297 | } while (bucket < ARRAY_SIZE(unix_socket_table)); |
| 2274 | struct sock *s; | ||
| 2275 | 2298 | ||
| 2276 | for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) { | ||
| 2277 | if (sock_net(s) != seq_file_net(seq)) | ||
| 2278 | continue; | ||
| 2279 | if (off == pos) | ||
| 2280 | return s; | ||
| 2281 | ++off; | ||
| 2282 | } | ||
| 2283 | return NULL; | 2299 | return NULL; |
| 2284 | } | 2300 | } |
| 2285 | 2301 | ||
| @@ -2287,22 +2303,20 @@ static void *unix_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2287 | __acquires(unix_table_lock) | 2303 | __acquires(unix_table_lock) |
| 2288 | { | 2304 | { |
| 2289 | spin_lock(&unix_table_lock); | 2305 | spin_lock(&unix_table_lock); |
| 2290 | return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2306 | |
| 2307 | if (!*pos) | ||
| 2308 | return SEQ_START_TOKEN; | ||
| 2309 | |||
| 2310 | if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table)) | ||
| 2311 | return NULL; | ||
| 2312 | |||
| 2313 | return unix_next_socket(seq, NULL, pos); | ||
| 2291 | } | 2314 | } |
| 2292 | 2315 | ||
| 2293 | static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2316 | static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 2294 | { | 2317 | { |
| 2295 | struct unix_iter_state *iter = seq->private; | ||
| 2296 | struct sock *sk = v; | ||
| 2297 | ++*pos; | 2318 | ++*pos; |
| 2298 | 2319 | return unix_next_socket(seq, v, pos); | |
| 2299 | if (v == SEQ_START_TOKEN) | ||
| 2300 | sk = first_unix_socket(&iter->i); | ||
| 2301 | else | ||
| 2302 | sk = next_unix_socket(&iter->i, sk); | ||
| 2303 | while (sk && (sock_net(sk) != seq_file_net(seq))) | ||
| 2304 | sk = next_unix_socket(&iter->i, sk); | ||
| 2305 | return sk; | ||
| 2306 | } | 2320 | } |
| 2307 | 2321 | ||
| 2308 | static void unix_seq_stop(struct seq_file *seq, void *v) | 2322 | static void unix_seq_stop(struct seq_file *seq, void *v) |
| @@ -2365,7 +2379,7 @@ static const struct seq_operations unix_seq_ops = { | |||
| 2365 | static int unix_seq_open(struct inode *inode, struct file *file) | 2379 | static int unix_seq_open(struct inode *inode, struct file *file) |
| 2366 | { | 2380 | { |
| 2367 | return seq_open_net(inode, file, &unix_seq_ops, | 2381 | return seq_open_net(inode, file, &unix_seq_ops, |
| 2368 | sizeof(struct unix_iter_state)); | 2382 | sizeof(struct seq_net_private)); |
| 2369 | } | 2383 | } |
| 2370 | 2384 | ||
| 2371 | static const struct file_operations unix_seq_fops = { | 2385 | static const struct file_operations unix_seq_fops = { |
diff --git a/net/unix/diag.c b/net/unix/diag.c index 47d3002737f5..750b13408449 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
| @@ -8,40 +8,31 @@ | |||
| 8 | #include <net/af_unix.h> | 8 | #include <net/af_unix.h> |
| 9 | #include <net/tcp_states.h> | 9 | #include <net/tcp_states.h> |
| 10 | 10 | ||
| 11 | #define UNIX_DIAG_PUT(skb, attrtype, attrlen) \ | ||
| 12 | RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) | ||
| 13 | |||
| 14 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) | 11 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) |
| 15 | { | 12 | { |
| 16 | struct unix_address *addr = unix_sk(sk)->addr; | 13 | struct unix_address *addr = unix_sk(sk)->addr; |
| 17 | char *s; | ||
| 18 | |||
| 19 | if (addr) { | ||
| 20 | s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short)); | ||
| 21 | memcpy(s, addr->name->sun_path, addr->len - sizeof(short)); | ||
| 22 | } | ||
| 23 | 14 | ||
| 24 | return 0; | 15 | if (!addr) |
| 16 | return 0; | ||
| 25 | 17 | ||
| 26 | rtattr_failure: | 18 | return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short), |
| 27 | return -EMSGSIZE; | 19 | addr->name->sun_path); |
| 28 | } | 20 | } |
| 29 | 21 | ||
| 30 | static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) | 22 | static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) |
| 31 | { | 23 | { |
| 32 | struct dentry *dentry = unix_sk(sk)->path.dentry; | 24 | struct dentry *dentry = unix_sk(sk)->path.dentry; |
| 33 | struct unix_diag_vfs *uv; | ||
| 34 | 25 | ||
| 35 | if (dentry) { | 26 | if (dentry) { |
| 36 | uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); | 27 | struct unix_diag_vfs uv = { |
| 37 | uv->udiag_vfs_ino = dentry->d_inode->i_ino; | 28 | .udiag_vfs_ino = dentry->d_inode->i_ino, |
| 38 | uv->udiag_vfs_dev = dentry->d_sb->s_dev; | 29 | .udiag_vfs_dev = dentry->d_sb->s_dev, |
| 30 | }; | ||
| 31 | |||
| 32 | return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); | ||
| 39 | } | 33 | } |
| 40 | 34 | ||
| 41 | return 0; | 35 | return 0; |
| 42 | |||
| 43 | rtattr_failure: | ||
| 44 | return -EMSGSIZE; | ||
| 45 | } | 36 | } |
| 46 | 37 | ||
| 47 | static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) | 38 | static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) |
| @@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) | |||
| 56 | unix_state_unlock(peer); | 47 | unix_state_unlock(peer); |
| 57 | sock_put(peer); | 48 | sock_put(peer); |
| 58 | 49 | ||
| 59 | RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); | 50 | return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); |
| 60 | } | 51 | } |
| 61 | 52 | ||
| 62 | return 0; | 53 | return 0; |
| 63 | rtattr_failure: | ||
| 64 | return -EMSGSIZE; | ||
| 65 | } | 54 | } |
| 66 | 55 | ||
| 67 | static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) | 56 | static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) |
| 68 | { | 57 | { |
| 69 | struct sk_buff *skb; | 58 | struct sk_buff *skb; |
| 59 | struct nlattr *attr; | ||
| 70 | u32 *buf; | 60 | u32 *buf; |
| 71 | int i; | 61 | int i; |
| 72 | 62 | ||
| 73 | if (sk->sk_state == TCP_LISTEN) { | 63 | if (sk->sk_state == TCP_LISTEN) { |
| 74 | spin_lock(&sk->sk_receive_queue.lock); | 64 | spin_lock(&sk->sk_receive_queue.lock); |
| 75 | buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, | 65 | |
| 76 | sk->sk_receive_queue.qlen * sizeof(u32)); | 66 | attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, |
| 67 | sk->sk_receive_queue.qlen * sizeof(u32)); | ||
| 68 | if (!attr) | ||
| 69 | goto errout; | ||
| 70 | |||
| 71 | buf = nla_data(attr); | ||
| 77 | i = 0; | 72 | i = 0; |
| 78 | skb_queue_walk(&sk->sk_receive_queue, skb) { | 73 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
| 79 | struct sock *req, *peer; | 74 | struct sock *req, *peer; |
| @@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) | |||
| 94 | 89 | ||
| 95 | return 0; | 90 | return 0; |
| 96 | 91 | ||
| 97 | rtattr_failure: | 92 | errout: |
| 98 | spin_unlock(&sk->sk_receive_queue.lock); | 93 | spin_unlock(&sk->sk_receive_queue.lock); |
| 99 | return -EMSGSIZE; | 94 | return -EMSGSIZE; |
| 100 | } | 95 | } |
| 101 | 96 | ||
| 102 | static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) | 97 | static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) |
| 103 | { | 98 | { |
| 104 | struct unix_diag_rqlen *rql; | 99 | struct unix_diag_rqlen rql; |
| 105 | |||
| 106 | rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql)); | ||
| 107 | 100 | ||
| 108 | if (sk->sk_state == TCP_LISTEN) { | 101 | if (sk->sk_state == TCP_LISTEN) { |
| 109 | rql->udiag_rqueue = sk->sk_receive_queue.qlen; | 102 | rql.udiag_rqueue = sk->sk_receive_queue.qlen; |
| 110 | rql->udiag_wqueue = sk->sk_max_ack_backlog; | 103 | rql.udiag_wqueue = sk->sk_max_ack_backlog; |
| 111 | } else { | 104 | } else { |
| 112 | rql->udiag_rqueue = (__u32)unix_inq_len(sk); | 105 | rql.udiag_rqueue = (u32) unix_inq_len(sk); |
| 113 | rql->udiag_wqueue = (__u32)unix_outq_len(sk); | 106 | rql.udiag_wqueue = (u32) unix_outq_len(sk); |
| 114 | } | 107 | } |
| 115 | 108 | ||
| 116 | return 0; | 109 | return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); |
| 117 | |||
| 118 | rtattr_failure: | ||
| 119 | return -EMSGSIZE; | ||
| 120 | } | 110 | } |
| 121 | 111 | ||
| 122 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, | 112 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, |
| 123 | u32 pid, u32 seq, u32 flags, int sk_ino) | 113 | u32 pid, u32 seq, u32 flags, int sk_ino) |
| 124 | { | 114 | { |
| 125 | unsigned char *b = skb_tail_pointer(skb); | ||
| 126 | struct nlmsghdr *nlh; | 115 | struct nlmsghdr *nlh; |
| 127 | struct unix_diag_msg *rep; | 116 | struct unix_diag_msg *rep; |
| 128 | 117 | ||
| 129 | nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); | 118 | nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), |
| 130 | nlh->nlmsg_flags = flags; | 119 | flags); |
| 131 | 120 | if (!nlh) | |
| 132 | rep = NLMSG_DATA(nlh); | 121 | return -EMSGSIZE; |
| 133 | 122 | ||
| 123 | rep = nlmsg_data(nlh); | ||
| 134 | rep->udiag_family = AF_UNIX; | 124 | rep->udiag_family = AF_UNIX; |
| 135 | rep->udiag_type = sk->sk_type; | 125 | rep->udiag_type = sk->sk_type; |
| 136 | rep->udiag_state = sk->sk_state; | 126 | rep->udiag_state = sk->sk_state; |
| @@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r | |||
| 139 | 129 | ||
| 140 | if ((req->udiag_show & UDIAG_SHOW_NAME) && | 130 | if ((req->udiag_show & UDIAG_SHOW_NAME) && |
| 141 | sk_diag_dump_name(sk, skb)) | 131 | sk_diag_dump_name(sk, skb)) |
| 142 | goto nlmsg_failure; | 132 | goto out_nlmsg_trim; |
| 143 | 133 | ||
| 144 | if ((req->udiag_show & UDIAG_SHOW_VFS) && | 134 | if ((req->udiag_show & UDIAG_SHOW_VFS) && |
| 145 | sk_diag_dump_vfs(sk, skb)) | 135 | sk_diag_dump_vfs(sk, skb)) |
| 146 | goto nlmsg_failure; | 136 | goto out_nlmsg_trim; |
| 147 | 137 | ||
| 148 | if ((req->udiag_show & UDIAG_SHOW_PEER) && | 138 | if ((req->udiag_show & UDIAG_SHOW_PEER) && |
| 149 | sk_diag_dump_peer(sk, skb)) | 139 | sk_diag_dump_peer(sk, skb)) |
| 150 | goto nlmsg_failure; | 140 | goto out_nlmsg_trim; |
| 151 | 141 | ||
| 152 | if ((req->udiag_show & UDIAG_SHOW_ICONS) && | 142 | if ((req->udiag_show & UDIAG_SHOW_ICONS) && |
| 153 | sk_diag_dump_icons(sk, skb)) | 143 | sk_diag_dump_icons(sk, skb)) |
| 154 | goto nlmsg_failure; | 144 | goto out_nlmsg_trim; |
| 155 | 145 | ||
| 156 | if ((req->udiag_show & UDIAG_SHOW_RQLEN) && | 146 | if ((req->udiag_show & UDIAG_SHOW_RQLEN) && |
| 157 | sk_diag_show_rqlen(sk, skb)) | 147 | sk_diag_show_rqlen(sk, skb)) |
| 158 | goto nlmsg_failure; | 148 | goto out_nlmsg_trim; |
| 159 | 149 | ||
| 160 | if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && | 150 | if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && |
| 161 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) | 151 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) |
| 162 | goto nlmsg_failure; | 152 | goto out_nlmsg_trim; |
| 163 | 153 | ||
| 164 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 154 | return nlmsg_end(skb, nlh); |
| 165 | return skb->len; | ||
| 166 | 155 | ||
| 167 | nlmsg_failure: | 156 | out_nlmsg_trim: |
| 168 | nlmsg_trim(skb, b); | 157 | nlmsg_cancel(skb, nlh); |
| 169 | return -EMSGSIZE; | 158 | return -EMSGSIZE; |
| 170 | } | 159 | } |
| 171 | 160 | ||
| @@ -188,19 +177,24 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 188 | { | 177 | { |
| 189 | struct unix_diag_req *req; | 178 | struct unix_diag_req *req; |
| 190 | int num, s_num, slot, s_slot; | 179 | int num, s_num, slot, s_slot; |
| 180 | struct net *net = sock_net(skb->sk); | ||
| 191 | 181 | ||
| 192 | req = NLMSG_DATA(cb->nlh); | 182 | req = nlmsg_data(cb->nlh); |
| 193 | 183 | ||
| 194 | s_slot = cb->args[0]; | 184 | s_slot = cb->args[0]; |
| 195 | num = s_num = cb->args[1]; | 185 | num = s_num = cb->args[1]; |
| 196 | 186 | ||
| 197 | spin_lock(&unix_table_lock); | 187 | spin_lock(&unix_table_lock); |
| 198 | for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { | 188 | for (slot = s_slot; |
| 189 | slot < ARRAY_SIZE(unix_socket_table); | ||
| 190 | s_num = 0, slot++) { | ||
| 199 | struct sock *sk; | 191 | struct sock *sk; |
| 200 | struct hlist_node *node; | 192 | struct hlist_node *node; |
| 201 | 193 | ||
| 202 | num = 0; | 194 | num = 0; |
| 203 | sk_for_each(sk, node, &unix_socket_table[slot]) { | 195 | sk_for_each(sk, node, &unix_socket_table[slot]) { |
| 196 | if (!net_eq(sock_net(sk), net)) | ||
| 197 | continue; | ||
| 204 | if (num < s_num) | 198 | if (num < s_num) |
| 205 | goto next; | 199 | goto next; |
| 206 | if (!(req->udiag_states & (1 << sk->sk_state))) | 200 | if (!(req->udiag_states & (1 << sk->sk_state))) |
| @@ -228,7 +222,7 @@ static struct sock *unix_lookup_by_ino(int ino) | |||
| 228 | struct sock *sk; | 222 | struct sock *sk; |
| 229 | 223 | ||
| 230 | spin_lock(&unix_table_lock); | 224 | spin_lock(&unix_table_lock); |
| 231 | for (i = 0; i <= UNIX_HASH_SIZE; i++) { | 225 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { |
| 232 | struct hlist_node *node; | 226 | struct hlist_node *node; |
| 233 | 227 | ||
| 234 | sk_for_each(sk, node, &unix_socket_table[i]) | 228 | sk_for_each(sk, node, &unix_socket_table[i]) |
| @@ -252,6 +246,7 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, | |||
| 252 | struct sock *sk; | 246 | struct sock *sk; |
| 253 | struct sk_buff *rep; | 247 | struct sk_buff *rep; |
| 254 | unsigned int extra_len; | 248 | unsigned int extra_len; |
| 249 | struct net *net = sock_net(in_skb->sk); | ||
| 255 | 250 | ||
| 256 | if (req->udiag_ino == 0) | 251 | if (req->udiag_ino == 0) |
| 257 | goto out_nosk; | 252 | goto out_nosk; |
| @@ -268,22 +263,21 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, | |||
| 268 | extra_len = 256; | 263 | extra_len = 256; |
| 269 | again: | 264 | again: |
| 270 | err = -ENOMEM; | 265 | err = -ENOMEM; |
| 271 | rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), | 266 | rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); |
| 272 | GFP_KERNEL); | ||
| 273 | if (!rep) | 267 | if (!rep) |
| 274 | goto out; | 268 | goto out; |
| 275 | 269 | ||
| 276 | err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, | 270 | err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, |
| 277 | nlh->nlmsg_seq, 0, req->udiag_ino); | 271 | nlh->nlmsg_seq, 0, req->udiag_ino); |
| 278 | if (err < 0) { | 272 | if (err < 0) { |
| 279 | kfree_skb(rep); | 273 | nlmsg_free(rep); |
| 280 | extra_len += 256; | 274 | extra_len += 256; |
| 281 | if (extra_len >= PAGE_SIZE) | 275 | if (extra_len >= PAGE_SIZE) |
| 282 | goto out; | 276 | goto out; |
| 283 | 277 | ||
| 284 | goto again; | 278 | goto again; |
| 285 | } | 279 | } |
| 286 | err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, | 280 | err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid, |
| 287 | MSG_DONTWAIT); | 281 | MSG_DONTWAIT); |
| 288 | if (err > 0) | 282 | if (err > 0) |
| 289 | err = 0; | 283 | err = 0; |
| @@ -297,6 +291,7 @@ out_nosk: | |||
| 297 | static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) | 291 | static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) |
| 298 | { | 292 | { |
| 299 | int hdrlen = sizeof(struct unix_diag_req); | 293 | int hdrlen = sizeof(struct unix_diag_req); |
| 294 | struct net *net = sock_net(skb->sk); | ||
| 300 | 295 | ||
| 301 | if (nlmsg_len(h) < hdrlen) | 296 | if (nlmsg_len(h) < hdrlen) |
| 302 | return -EINVAL; | 297 | return -EINVAL; |
| @@ -305,9 +300,9 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) | |||
| 305 | struct netlink_dump_control c = { | 300 | struct netlink_dump_control c = { |
| 306 | .dump = unix_diag_dump, | 301 | .dump = unix_diag_dump, |
| 307 | }; | 302 | }; |
| 308 | return netlink_dump_start(sock_diag_nlsk, skb, h, &c); | 303 | return netlink_dump_start(net->diag_nlsk, skb, h, &c); |
| 309 | } else | 304 | } else |
| 310 | return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); | 305 | return unix_diag_get_exact(skb, h, nlmsg_data(h)); |
| 311 | } | 306 | } |
| 312 | 307 | ||
| 313 | static const struct sock_diag_handler unix_diag_handler = { | 308 | static const struct sock_diag_handler unix_diag_handler = { |
