diff options
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r-- | net/tipc/socket.c | 193 |
1 files changed, 141 insertions, 52 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index b0323ec7971e..252a52ae0893 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -289,10 +289,9 @@ static bool tipc_sk_type_connectionless(struct sock *sk) | |||
289 | static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) | 289 | static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) |
290 | { | 290 | { |
291 | struct sock *sk = &tsk->sk; | 291 | struct sock *sk = &tsk->sk; |
292 | struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); | 292 | u32 self = tipc_own_addr(sock_net(sk)); |
293 | u32 peer_port = tsk_peer_port(tsk); | 293 | u32 peer_port = tsk_peer_port(tsk); |
294 | u32 orig_node; | 294 | u32 orig_node, peer_node; |
295 | u32 peer_node; | ||
296 | 295 | ||
297 | if (unlikely(!tipc_sk_connected(sk))) | 296 | if (unlikely(!tipc_sk_connected(sk))) |
298 | return false; | 297 | return false; |
@@ -306,10 +305,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) | |||
306 | if (likely(orig_node == peer_node)) | 305 | if (likely(orig_node == peer_node)) |
307 | return true; | 306 | return true; |
308 | 307 | ||
309 | if (!orig_node && (peer_node == tn->own_addr)) | 308 | if (!orig_node && peer_node == self) |
310 | return true; | 309 | return true; |
311 | 310 | ||
312 | if (!peer_node && (orig_node == tn->own_addr)) | 311 | if (!peer_node && orig_node == self) |
313 | return true; | 312 | return true; |
314 | 313 | ||
315 | return false; | 314 | return false; |
@@ -461,8 +460,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
461 | /* Ensure tsk is visible before we read own_addr. */ | 460 | /* Ensure tsk is visible before we read own_addr. */ |
462 | smp_mb(); | 461 | smp_mb(); |
463 | 462 | ||
464 | tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, | 463 | tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, |
465 | NAMED_H_SIZE, 0); | 464 | TIPC_NAMED_MSG, NAMED_H_SIZE, 0); |
466 | 465 | ||
467 | msg_set_origport(msg, tsk->portid); | 466 | msg_set_origport(msg, tsk->portid); |
468 | timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); | 467 | timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); |
@@ -473,6 +472,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
473 | sk->sk_write_space = tipc_write_space; | 472 | sk->sk_write_space = tipc_write_space; |
474 | sk->sk_destruct = tipc_sock_destruct; | 473 | sk->sk_destruct = tipc_sock_destruct; |
475 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; | 474 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; |
475 | tsk->group_is_open = true; | ||
476 | atomic_set(&tsk->dupl_rcvcnt, 0); | 476 | atomic_set(&tsk->dupl_rcvcnt, 0); |
477 | 477 | ||
478 | /* Start out with safe limits until we receive an advertised window */ | 478 | /* Start out with safe limits until we receive an advertised window */ |
@@ -643,7 +643,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, | |||
643 | goto exit; | 643 | goto exit; |
644 | } | 644 | } |
645 | 645 | ||
646 | res = (addr->scope > 0) ? | 646 | res = (addr->scope >= 0) ? |
647 | tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : | 647 | tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : |
648 | tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); | 648 | tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); |
649 | exit: | 649 | exit: |
@@ -665,12 +665,11 @@ exit: | |||
665 | * a completely predictable manner). | 665 | * a completely predictable manner). |
666 | */ | 666 | */ |
667 | static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, | 667 | static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, |
668 | int *uaddr_len, int peer) | 668 | int peer) |
669 | { | 669 | { |
670 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 670 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
671 | struct sock *sk = sock->sk; | 671 | struct sock *sk = sock->sk; |
672 | struct tipc_sock *tsk = tipc_sk(sk); | 672 | struct tipc_sock *tsk = tipc_sk(sk); |
673 | struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); | ||
674 | 673 | ||
675 | memset(addr, 0, sizeof(*addr)); | 674 | memset(addr, 0, sizeof(*addr)); |
676 | if (peer) { | 675 | if (peer) { |
@@ -681,16 +680,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, | |||
681 | addr->addr.id.node = tsk_peer_node(tsk); | 680 | addr->addr.id.node = tsk_peer_node(tsk); |
682 | } else { | 681 | } else { |
683 | addr->addr.id.ref = tsk->portid; | 682 | addr->addr.id.ref = tsk->portid; |
684 | addr->addr.id.node = tn->own_addr; | 683 | addr->addr.id.node = tipc_own_addr(sock_net(sk)); |
685 | } | 684 | } |
686 | 685 | ||
687 | *uaddr_len = sizeof(*addr); | ||
688 | addr->addrtype = TIPC_ADDR_ID; | 686 | addr->addrtype = TIPC_ADDR_ID; |
689 | addr->family = AF_TIPC; | 687 | addr->family = AF_TIPC; |
690 | addr->scope = 0; | 688 | addr->scope = 0; |
691 | addr->addr.name.domain = 0; | 689 | addr->addr.name.domain = 0; |
692 | 690 | ||
693 | return 0; | 691 | return sizeof(*addr); |
694 | } | 692 | } |
695 | 693 | ||
696 | /** | 694 | /** |
@@ -1280,8 +1278,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
1280 | struct tipc_msg *hdr = &tsk->phdr; | 1278 | struct tipc_msg *hdr = &tsk->phdr; |
1281 | struct tipc_name_seq *seq; | 1279 | struct tipc_name_seq *seq; |
1282 | struct sk_buff_head pkts; | 1280 | struct sk_buff_head pkts; |
1283 | u32 type, inst, domain; | 1281 | u32 dport, dnode = 0; |
1284 | u32 dnode, dport; | 1282 | u32 type, inst; |
1285 | int mtu, rc; | 1283 | int mtu, rc; |
1286 | 1284 | ||
1287 | if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) | 1285 | if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) |
@@ -1332,13 +1330,12 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
1332 | if (dest->addrtype == TIPC_ADDR_NAME) { | 1330 | if (dest->addrtype == TIPC_ADDR_NAME) { |
1333 | type = dest->addr.name.name.type; | 1331 | type = dest->addr.name.name.type; |
1334 | inst = dest->addr.name.name.instance; | 1332 | inst = dest->addr.name.name.instance; |
1335 | domain = dest->addr.name.domain; | 1333 | dnode = dest->addr.name.domain; |
1336 | dnode = domain; | ||
1337 | msg_set_type(hdr, TIPC_NAMED_MSG); | 1334 | msg_set_type(hdr, TIPC_NAMED_MSG); |
1338 | msg_set_hdr_sz(hdr, NAMED_H_SIZE); | 1335 | msg_set_hdr_sz(hdr, NAMED_H_SIZE); |
1339 | msg_set_nametype(hdr, type); | 1336 | msg_set_nametype(hdr, type); |
1340 | msg_set_nameinst(hdr, inst); | 1337 | msg_set_nameinst(hdr, inst); |
1341 | msg_set_lookup_scope(hdr, tipc_addr_scope(domain)); | 1338 | msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); |
1342 | dport = tipc_nametbl_translate(net, type, inst, &dnode); | 1339 | dport = tipc_nametbl_translate(net, type, inst, &dnode); |
1343 | msg_set_destnode(hdr, dnode); | 1340 | msg_set_destnode(hdr, dnode); |
1344 | msg_set_destport(hdr, dport); | 1341 | msg_set_destport(hdr, dport); |
@@ -1351,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
1351 | msg_set_destnode(hdr, dnode); | 1348 | msg_set_destnode(hdr, dnode); |
1352 | msg_set_destport(hdr, dest->addr.id.ref); | 1349 | msg_set_destport(hdr, dest->addr.id.ref); |
1353 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); | 1350 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); |
1351 | } else { | ||
1352 | return -EINVAL; | ||
1354 | } | 1353 | } |
1355 | 1354 | ||
1356 | /* Block or return if destination link is congested */ | 1355 | /* Block or return if destination link is congested */ |
@@ -2123,8 +2122,10 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, | |||
2123 | (!sk_conn && msg_connected(hdr)) || | 2122 | (!sk_conn && msg_connected(hdr)) || |
2124 | (!grp && msg_in_group(hdr))) | 2123 | (!grp && msg_in_group(hdr))) |
2125 | err = TIPC_ERR_NO_PORT; | 2124 | err = TIPC_ERR_NO_PORT; |
2126 | else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) | 2125 | else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { |
2126 | atomic_inc(&sk->sk_drops); | ||
2127 | err = TIPC_ERR_OVERLOAD; | 2127 | err = TIPC_ERR_OVERLOAD; |
2128 | } | ||
2128 | 2129 | ||
2129 | if (unlikely(err)) { | 2130 | if (unlikely(err)) { |
2130 | tipc_skb_reject(net, err, skb, xmitq); | 2131 | tipc_skb_reject(net, err, skb, xmitq); |
@@ -2203,6 +2204,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
2203 | 2204 | ||
2204 | /* Overload => reject message back to sender */ | 2205 | /* Overload => reject message back to sender */ |
2205 | onode = tipc_own_addr(sock_net(sk)); | 2206 | onode = tipc_own_addr(sock_net(sk)); |
2207 | atomic_inc(&sk->sk_drops); | ||
2206 | if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) | 2208 | if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) |
2207 | __skb_queue_tail(xmitq, skb); | 2209 | __skb_queue_tail(xmitq, skb); |
2208 | break; | 2210 | break; |
@@ -2592,6 +2594,9 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, | |||
2592 | struct publication *publ; | 2594 | struct publication *publ; |
2593 | u32 key; | 2595 | u32 key; |
2594 | 2596 | ||
2597 | if (scope != TIPC_NODE_SCOPE) | ||
2598 | scope = TIPC_CLUSTER_SCOPE; | ||
2599 | |||
2595 | if (tipc_sk_connected(sk)) | 2600 | if (tipc_sk_connected(sk)) |
2596 | return -EINVAL; | 2601 | return -EINVAL; |
2597 | key = tsk->portid + tsk->pub_count + 1; | 2602 | key = tsk->portid + tsk->pub_count + 1; |
@@ -2603,7 +2608,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, | |||
2603 | if (unlikely(!publ)) | 2608 | if (unlikely(!publ)) |
2604 | return -EINVAL; | 2609 | return -EINVAL; |
2605 | 2610 | ||
2606 | list_add(&publ->pport_list, &tsk->publications); | 2611 | list_add(&publ->binding_sock, &tsk->publications); |
2607 | tsk->pub_count++; | 2612 | tsk->pub_count++; |
2608 | tsk->published = 1; | 2613 | tsk->published = 1; |
2609 | return 0; | 2614 | return 0; |
@@ -2617,7 +2622,10 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | |||
2617 | struct publication *safe; | 2622 | struct publication *safe; |
2618 | int rc = -EINVAL; | 2623 | int rc = -EINVAL; |
2619 | 2624 | ||
2620 | list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { | 2625 | if (scope != TIPC_NODE_SCOPE) |
2626 | scope = TIPC_CLUSTER_SCOPE; | ||
2627 | |||
2628 | list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { | ||
2621 | if (seq) { | 2629 | if (seq) { |
2622 | if (publ->scope != scope) | 2630 | if (publ->scope != scope) |
2623 | continue; | 2631 | continue; |
@@ -2628,12 +2636,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | |||
2628 | if (publ->upper != seq->upper) | 2636 | if (publ->upper != seq->upper) |
2629 | break; | 2637 | break; |
2630 | tipc_nametbl_withdraw(net, publ->type, publ->lower, | 2638 | tipc_nametbl_withdraw(net, publ->type, publ->lower, |
2631 | publ->ref, publ->key); | 2639 | publ->upper, publ->key); |
2632 | rc = 0; | 2640 | rc = 0; |
2633 | break; | 2641 | break; |
2634 | } | 2642 | } |
2635 | tipc_nametbl_withdraw(net, publ->type, publ->lower, | 2643 | tipc_nametbl_withdraw(net, publ->type, publ->lower, |
2636 | publ->ref, publ->key); | 2644 | publ->upper, publ->key); |
2637 | rc = 0; | 2645 | rc = 0; |
2638 | } | 2646 | } |
2639 | if (list_empty(&tsk->publications)) | 2647 | if (list_empty(&tsk->publications)) |
@@ -2659,8 +2667,8 @@ void tipc_sk_reinit(struct net *net) | |||
2659 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { | 2667 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { |
2660 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 2668 | spin_lock_bh(&tsk->sk.sk_lock.slock); |
2661 | msg = &tsk->phdr; | 2669 | msg = &tsk->phdr; |
2662 | msg_set_prevnode(msg, tn->own_addr); | 2670 | msg_set_prevnode(msg, tipc_own_addr(net)); |
2663 | msg_set_orignode(msg, tn->own_addr); | 2671 | msg_set_orignode(msg, tipc_own_addr(net)); |
2664 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 2672 | spin_unlock_bh(&tsk->sk.sk_lock.slock); |
2665 | } | 2673 | } |
2666 | 2674 | ||
@@ -3155,16 +3163,32 @@ msg_full: | |||
3155 | return -EMSGSIZE; | 3163 | return -EMSGSIZE; |
3156 | } | 3164 | } |
3157 | 3165 | ||
3166 | static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock | ||
3167 | *tsk) | ||
3168 | { | ||
3169 | struct net *net = sock_net(skb->sk); | ||
3170 | struct sock *sk = &tsk->sk; | ||
3171 | |||
3172 | if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || | ||
3173 | nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) | ||
3174 | return -EMSGSIZE; | ||
3175 | |||
3176 | if (tipc_sk_connected(sk)) { | ||
3177 | if (__tipc_nl_add_sk_con(skb, tsk)) | ||
3178 | return -EMSGSIZE; | ||
3179 | } else if (!list_empty(&tsk->publications)) { | ||
3180 | if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) | ||
3181 | return -EMSGSIZE; | ||
3182 | } | ||
3183 | return 0; | ||
3184 | } | ||
3185 | |||
3158 | /* Caller should hold socket lock for the passed tipc socket. */ | 3186 | /* Caller should hold socket lock for the passed tipc socket. */ |
3159 | static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, | 3187 | static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, |
3160 | struct tipc_sock *tsk) | 3188 | struct tipc_sock *tsk) |
3161 | { | 3189 | { |
3162 | int err; | ||
3163 | void *hdr; | ||
3164 | struct nlattr *attrs; | 3190 | struct nlattr *attrs; |
3165 | struct net *net = sock_net(skb->sk); | 3191 | void *hdr; |
3166 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
3167 | struct sock *sk = &tsk->sk; | ||
3168 | 3192 | ||
3169 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 3193 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
3170 | &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); | 3194 | &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); |
@@ -3174,19 +3198,10 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, | |||
3174 | attrs = nla_nest_start(skb, TIPC_NLA_SOCK); | 3198 | attrs = nla_nest_start(skb, TIPC_NLA_SOCK); |
3175 | if (!attrs) | 3199 | if (!attrs) |
3176 | goto genlmsg_cancel; | 3200 | goto genlmsg_cancel; |
3177 | if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid)) | 3201 | |
3178 | goto attr_msg_cancel; | 3202 | if (__tipc_nl_add_sk_info(skb, tsk)) |
3179 | if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) | ||
3180 | goto attr_msg_cancel; | 3203 | goto attr_msg_cancel; |
3181 | 3204 | ||
3182 | if (tipc_sk_connected(sk)) { | ||
3183 | err = __tipc_nl_add_sk_con(skb, tsk); | ||
3184 | if (err) | ||
3185 | goto attr_msg_cancel; | ||
3186 | } else if (!list_empty(&tsk->publications)) { | ||
3187 | if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) | ||
3188 | goto attr_msg_cancel; | ||
3189 | } | ||
3190 | nla_nest_end(skb, attrs); | 3205 | nla_nest_end(skb, attrs); |
3191 | genlmsg_end(skb, hdr); | 3206 | genlmsg_end(skb, hdr); |
3192 | 3207 | ||
@@ -3200,16 +3215,19 @@ msg_cancel: | |||
3200 | return -EMSGSIZE; | 3215 | return -EMSGSIZE; |
3201 | } | 3216 | } |
3202 | 3217 | ||
3203 | int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) | 3218 | int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, |
3219 | int (*skb_handler)(struct sk_buff *skb, | ||
3220 | struct netlink_callback *cb, | ||
3221 | struct tipc_sock *tsk)) | ||
3204 | { | 3222 | { |
3205 | int err; | ||
3206 | struct tipc_sock *tsk; | ||
3207 | const struct bucket_table *tbl; | ||
3208 | struct rhash_head *pos; | ||
3209 | struct net *net = sock_net(skb->sk); | 3223 | struct net *net = sock_net(skb->sk); |
3210 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 3224 | struct tipc_net *tn = tipc_net(net); |
3211 | u32 tbl_id = cb->args[0]; | 3225 | const struct bucket_table *tbl; |
3212 | u32 prev_portid = cb->args[1]; | 3226 | u32 prev_portid = cb->args[1]; |
3227 | u32 tbl_id = cb->args[0]; | ||
3228 | struct rhash_head *pos; | ||
3229 | struct tipc_sock *tsk; | ||
3230 | int err; | ||
3213 | 3231 | ||
3214 | rcu_read_lock(); | 3232 | rcu_read_lock(); |
3215 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | 3233 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); |
@@ -3221,12 +3239,13 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
3221 | continue; | 3239 | continue; |
3222 | } | 3240 | } |
3223 | 3241 | ||
3224 | err = __tipc_nl_add_sk(skb, cb, tsk); | 3242 | err = skb_handler(skb, cb, tsk); |
3225 | if (err) { | 3243 | if (err) { |
3226 | prev_portid = tsk->portid; | 3244 | prev_portid = tsk->portid; |
3227 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3245 | spin_unlock_bh(&tsk->sk.sk_lock.slock); |
3228 | goto out; | 3246 | goto out; |
3229 | } | 3247 | } |
3248 | |||
3230 | prev_portid = 0; | 3249 | prev_portid = 0; |
3231 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3250 | spin_unlock_bh(&tsk->sk.sk_lock.slock); |
3232 | } | 3251 | } |
@@ -3238,6 +3257,76 @@ out: | |||
3238 | 3257 | ||
3239 | return skb->len; | 3258 | return skb->len; |
3240 | } | 3259 | } |
3260 | EXPORT_SYMBOL(tipc_nl_sk_walk); | ||
3261 | |||
3262 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, | ||
3263 | struct tipc_sock *tsk, u32 sk_filter_state, | ||
3264 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) | ||
3265 | { | ||
3266 | struct sock *sk = &tsk->sk; | ||
3267 | struct nlattr *attrs; | ||
3268 | struct nlattr *stat; | ||
3269 | |||
3270 | /*filter response w.r.t sk_state*/ | ||
3271 | if (!(sk_filter_state & (1 << sk->sk_state))) | ||
3272 | return 0; | ||
3273 | |||
3274 | attrs = nla_nest_start(skb, TIPC_NLA_SOCK); | ||
3275 | if (!attrs) | ||
3276 | goto msg_cancel; | ||
3277 | |||
3278 | if (__tipc_nl_add_sk_info(skb, tsk)) | ||
3279 | goto attr_msg_cancel; | ||
3280 | |||
3281 | if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || | ||
3282 | nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || | ||
3283 | nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || | ||
3284 | nla_put_u32(skb, TIPC_NLA_SOCK_UID, | ||
3285 | from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), | ||
3286 | sock_i_uid(sk))) || | ||
3287 | nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, | ||
3288 | tipc_diag_gen_cookie(sk), | ||
3289 | TIPC_NLA_SOCK_PAD)) | ||
3290 | goto attr_msg_cancel; | ||
3291 | |||
3292 | stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); | ||
3293 | if (!stat) | ||
3294 | goto attr_msg_cancel; | ||
3295 | |||
3296 | if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, | ||
3297 | skb_queue_len(&sk->sk_receive_queue)) || | ||
3298 | nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, | ||
3299 | skb_queue_len(&sk->sk_write_queue)) || | ||
3300 | nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, | ||
3301 | atomic_read(&sk->sk_drops))) | ||
3302 | goto stat_msg_cancel; | ||
3303 | |||
3304 | if (tsk->cong_link_cnt && | ||
3305 | nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) | ||
3306 | goto stat_msg_cancel; | ||
3307 | |||
3308 | if (tsk_conn_cong(tsk) && | ||
3309 | nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) | ||
3310 | goto stat_msg_cancel; | ||
3311 | |||
3312 | nla_nest_end(skb, stat); | ||
3313 | nla_nest_end(skb, attrs); | ||
3314 | |||
3315 | return 0; | ||
3316 | |||
3317 | stat_msg_cancel: | ||
3318 | nla_nest_cancel(skb, stat); | ||
3319 | attr_msg_cancel: | ||
3320 | nla_nest_cancel(skb, attrs); | ||
3321 | msg_cancel: | ||
3322 | return -EMSGSIZE; | ||
3323 | } | ||
3324 | EXPORT_SYMBOL(tipc_sk_fill_sock_diag); | ||
3325 | |||
3326 | int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
3327 | { | ||
3328 | return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); | ||
3329 | } | ||
3241 | 3330 | ||
3242 | /* Caller should hold socket lock for the passed tipc socket. */ | 3331 | /* Caller should hold socket lock for the passed tipc socket. */ |
3243 | static int __tipc_nl_add_sk_publ(struct sk_buff *skb, | 3332 | static int __tipc_nl_add_sk_publ(struct sk_buff *skb, |
@@ -3287,7 +3376,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, | |||
3287 | struct publication *p; | 3376 | struct publication *p; |
3288 | 3377 | ||
3289 | if (*last_publ) { | 3378 | if (*last_publ) { |
3290 | list_for_each_entry(p, &tsk->publications, pport_list) { | 3379 | list_for_each_entry(p, &tsk->publications, binding_sock) { |
3291 | if (p->key == *last_publ) | 3380 | if (p->key == *last_publ) |
3292 | break; | 3381 | break; |
3293 | } | 3382 | } |
@@ -3304,10 +3393,10 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, | |||
3304 | } | 3393 | } |
3305 | } else { | 3394 | } else { |
3306 | p = list_first_entry(&tsk->publications, struct publication, | 3395 | p = list_first_entry(&tsk->publications, struct publication, |
3307 | pport_list); | 3396 | binding_sock); |
3308 | } | 3397 | } |
3309 | 3398 | ||
3310 | list_for_each_entry_from(p, &tsk->publications, pport_list) { | 3399 | list_for_each_entry_from(p, &tsk->publications, binding_sock) { |
3311 | err = __tipc_nl_add_sk_publ(skb, cb, p); | 3400 | err = __tipc_nl_add_sk_publ(skb, cb, p); |
3312 | if (err) { | 3401 | if (err) { |
3313 | *last_publ = p->key; | 3402 | *last_publ = p->key; |