aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-16 00:12:14 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-16 15:55:29 -0400
commita58917f584e776b9fe31ef2a8bf617f253378dc0 (patch)
treea7badd202ec61cfd201c6e5fda87295932bc2770
parentf7e4eb03f9d9e2522bdd5107f37f9cf1af0bf0fa (diff)
inet_diag: allow sk_diag_fill() to handle request socks
inet_diag_fill_req() is renamed to inet_req_diag_fill() and moved up, so that it can be called fom sk_diag_fill() inet_diag_bc_sk() is ready to handle request socks. inet_twsk_diag_dump() is no longer needed. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/inet_diag.c120
1 files changed, 53 insertions, 67 deletions
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ac7b5c909fe7..e7ba59038c8d 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -113,14 +113,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
113 return -EMSGSIZE; 113 return -EMSGSIZE;
114 114
115 r = nlmsg_data(nlh); 115 r = nlmsg_data(nlh);
116 BUG_ON((1 << sk->sk_state) & (TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV)); 116 BUG_ON(!sk_fullsock(sk));
117 117
118 inet_diag_msg_common_fill(r, sk); 118 inet_diag_msg_common_fill(r, sk);
119 r->idiag_state = sk->sk_state; 119 r->idiag_state = sk->sk_state;
120 r->idiag_timer = 0; 120 r->idiag_timer = 0;
121 r->idiag_retrans = 0; 121 r->idiag_retrans = 0;
122 122
123
124 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) 123 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
125 goto errout; 124 goto errout;
126 125
@@ -229,7 +228,6 @@ static int inet_csk_diag_fill(struct sock *sk,
229 228
230static int inet_twsk_diag_fill(struct sock *sk, 229static int inet_twsk_diag_fill(struct sock *sk,
231 struct sk_buff *skb, 230 struct sk_buff *skb,
232 const struct inet_diag_req_v2 *req,
233 u32 portid, u32 seq, u16 nlmsg_flags, 231 u32 portid, u32 seq, u16 nlmsg_flags,
234 const struct nlmsghdr *unlh) 232 const struct nlmsghdr *unlh)
235{ 233{
@@ -265,6 +263,39 @@ static int inet_twsk_diag_fill(struct sock *sk,
265 return 0; 263 return 0;
266} 264}
267 265
266static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
267 u32 portid, u32 seq, u16 nlmsg_flags,
268 const struct nlmsghdr *unlh)
269{
270 struct inet_diag_msg *r;
271 struct nlmsghdr *nlh;
272 long tmo;
273
274 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
275 nlmsg_flags);
276 if (!nlh)
277 return -EMSGSIZE;
278
279 r = nlmsg_data(nlh);
280 inet_diag_msg_common_fill(r, sk);
281 r->idiag_state = TCP_SYN_RECV;
282 r->idiag_timer = 1;
283 r->idiag_retrans = inet_reqsk(sk)->num_retrans;
284
285 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
286 offsetof(struct sock, sk_cookie));
287
288 tmo = inet_reqsk(sk)->expires - jiffies;
289 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
290 r->idiag_rqueue = 0;
291 r->idiag_wqueue = 0;
292 r->idiag_uid = 0;
293 r->idiag_inode = 0;
294
295 nlmsg_end(skb, nlh);
296 return 0;
297}
298
268static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 299static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
269 const struct inet_diag_req_v2 *r, 300 const struct inet_diag_req_v2 *r,
270 struct user_namespace *user_ns, 301 struct user_namespace *user_ns,
@@ -272,9 +303,13 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
272 const struct nlmsghdr *unlh) 303 const struct nlmsghdr *unlh)
273{ 304{
274 if (sk->sk_state == TCP_TIME_WAIT) 305 if (sk->sk_state == TCP_TIME_WAIT)
275 return inet_twsk_diag_fill(sk, skb, r, portid, seq, 306 return inet_twsk_diag_fill(sk, skb, portid, seq,
276 nlmsg_flags, unlh); 307 nlmsg_flags, unlh);
277 308
309 if (sk->sk_state == TCP_NEW_SYN_RECV)
310 return inet_req_diag_fill(sk, skb, portid, seq,
311 nlmsg_flags, unlh);
312
278 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, 313 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
279 nlmsg_flags, unlh); 314 nlmsg_flags, unlh);
280} 315}
@@ -502,7 +537,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
502 entry_fill_addrs(&entry, sk); 537 entry_fill_addrs(&entry, sk);
503 entry.sport = inet->inet_num; 538 entry.sport = inet->inet_num;
504 entry.dport = ntohs(inet->inet_dport); 539 entry.dport = ntohs(inet->inet_dport);
505 entry.userlocks = (sk->sk_state != TCP_TIME_WAIT) ? sk->sk_userlocks : 0; 540 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
506 541
507 return inet_diag_bc_run(bc, &entry); 542 return inet_diag_bc_run(bc, &entry);
508} 543}
@@ -661,61 +696,6 @@ static void twsk_build_assert(void)
661#endif 696#endif
662} 697}
663 698
664static int inet_twsk_diag_dump(struct sock *sk,
665 struct sk_buff *skb,
666 struct netlink_callback *cb,
667 const struct inet_diag_req_v2 *r,
668 const struct nlattr *bc)
669{
670 twsk_build_assert();
671
672 if (!inet_diag_bc_sk(bc, sk))
673 return 0;
674
675 return inet_twsk_diag_fill(sk, skb, r,
676 NETLINK_CB(cb->skb).portid,
677 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
678}
679
680static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
681 struct request_sock *req,
682 struct user_namespace *user_ns,
683 u32 portid, u32 seq,
684 const struct nlmsghdr *unlh)
685{
686 const struct inet_request_sock *ireq = inet_rsk(req);
687 struct inet_diag_msg *r;
688 struct nlmsghdr *nlh;
689 long tmo;
690
691 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
692 NLM_F_MULTI);
693 if (!nlh)
694 return -EMSGSIZE;
695
696 r = nlmsg_data(nlh);
697 inet_diag_msg_common_fill(r, (struct sock *)ireq);
698 r->idiag_state = TCP_SYN_RECV;
699 r->idiag_timer = 1;
700 r->idiag_retrans = req->num_retrans;
701
702 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
703 offsetof(struct sock, sk_cookie));
704
705 tmo = req->expires - jiffies;
706 if (tmo < 0)
707 tmo = 0;
708
709 r->idiag_expires = jiffies_to_msecs(tmo);
710 r->idiag_rqueue = 0;
711 r->idiag_wqueue = 0;
712 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
713 r->idiag_inode = 0;
714
715 nlmsg_end(skb, nlh);
716 return 0;
717}
718
719static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 699static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
720 struct netlink_callback *cb, 700 struct netlink_callback *cb,
721 const struct inet_diag_req_v2 *r, 701 const struct inet_diag_req_v2 *r,
@@ -769,10 +749,10 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
769 continue; 749 continue;
770 } 750 }
771 751
772 err = inet_diag_fill_req(skb, sk, req, 752 err = inet_req_diag_fill((struct sock *)req, skb,
773 sk_user_ns(NETLINK_CB(cb->skb).sk),
774 NETLINK_CB(cb->skb).portid, 753 NETLINK_CB(cb->skb).portid,
775 cb->nlh->nlmsg_seq, cb->nlh); 754 cb->nlh->nlmsg_seq,
755 NLM_F_MULTI, cb->nlh);
776 if (err < 0) { 756 if (err < 0) {
777 cb->args[3] = j + 1; 757 cb->args[3] = j + 1;
778 cb->args[4] = reqnum; 758 cb->args[4] = reqnum;
@@ -903,10 +883,16 @@ skip_listen_ht:
903 if (r->id.idiag_dport != sk->sk_dport && 883 if (r->id.idiag_dport != sk->sk_dport &&
904 r->id.idiag_dport) 884 r->id.idiag_dport)
905 goto next_normal; 885 goto next_normal;
906 if (sk->sk_state == TCP_TIME_WAIT) 886 twsk_build_assert();
907 res = inet_twsk_diag_dump(sk, skb, cb, r, bc); 887
908 else 888 if (!inet_diag_bc_sk(bc, sk))
909 res = inet_csk_diag_dump(sk, skb, cb, r, bc); 889 goto next_normal;
890
891 res = sk_diag_fill(sk, skb, r,
892 sk_user_ns(NETLINK_CB(cb->skb).sk),
893 NETLINK_CB(cb->skb).portid,
894 cb->nlh->nlmsg_seq, NLM_F_MULTI,
895 cb->nlh);
910 if (res < 0) { 896 if (res < 0) {
911 spin_unlock_bh(lock); 897 spin_unlock_bh(lock);
912 goto done; 898 goto done;