aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-11 21:53:14 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-11 21:55:28 -0400
commit33cf7c90fe2f97afb1cadaa0cfb782cb9d1b9ee2 (patch)
tree7a0c80d0b2bb618919d966ce5b827c7eb8f843f6 /net/ipv4
parent654eff45166c7e89d18fc476325c975768b2e347 (diff)
net: add real socket cookies
A long standing problem in netlink socket dumps is the use of kernel socket addresses as cookies. 1) It is a security concern. 2) Sockets can be reused quite quickly, so there is no guarantee a cookie is used once and identify a flow. 3) request sock, establish sock, and timewait socks for a given flow have different cookies. Part of our effort to bring better TCP statistics requires to switch to a different allocator. In this patch, I chose to use a per network namespace 64bit generator, and to use it only in the case a socket needs to be dumped to netlink. (This might be refined later if needed) Note that I tried to carry cookies from request sock, to establish sock, then timewait sockets. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Eric Salo <salo@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/tcp_input.c2
5 files changed, 15 insertions, 5 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 14d02ea905b6..34581f928afa 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -678,6 +678,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
678 newsk->sk_write_space = sk_stream_write_space; 678 newsk->sk_write_space = sk_stream_write_space;
679 679
680 newsk->sk_mark = inet_rsk(req)->ir_mark; 680 newsk->sk_mark = inet_rsk(req)->ir_mark;
681 atomic64_set(&newsk->sk_cookie,
682 atomic64_read(&inet_rsk(req)->ir_cookie));
681 683
682 newicsk->icsk_retransmits = 0; 684 newicsk->icsk_retransmits = 0;
683 newicsk->icsk_backoff = 0; 685 newicsk->icsk_backoff = 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ac3bfb458afd..29317ff4a007 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -221,12 +221,13 @@ static int inet_csk_diag_fill(struct sock *sk,
221 user_ns, portid, seq, nlmsg_flags, unlh); 221 user_ns, portid, seq, nlmsg_flags, unlh);
222} 222}
223 223
224static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 224static int inet_twsk_diag_fill(struct sock *sk,
225 struct sk_buff *skb, 225 struct sk_buff *skb,
226 const struct inet_diag_req_v2 *req, 226 const struct inet_diag_req_v2 *req,
227 u32 portid, u32 seq, u16 nlmsg_flags, 227 u32 portid, u32 seq, u16 nlmsg_flags,
228 const struct nlmsghdr *unlh) 228 const struct nlmsghdr *unlh)
229{ 229{
230 struct inet_timewait_sock *tw = inet_twsk(sk);
230 struct inet_diag_msg *r; 231 struct inet_diag_msg *r;
231 struct nlmsghdr *nlh; 232 struct nlmsghdr *nlh;
232 s32 tmo; 233 s32 tmo;
@@ -247,7 +248,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
247 r->idiag_retrans = 0; 248 r->idiag_retrans = 0;
248 249
249 r->id.idiag_if = tw->tw_bound_dev_if; 250 r->id.idiag_if = tw->tw_bound_dev_if;
250 sock_diag_save_cookie(tw, r->id.idiag_cookie); 251 sock_diag_save_cookie(sk, r->id.idiag_cookie);
251 252
252 r->id.idiag_sport = tw->tw_sport; 253 r->id.idiag_sport = tw->tw_sport;
253 r->id.idiag_dport = tw->tw_dport; 254 r->id.idiag_dport = tw->tw_dport;
@@ -283,7 +284,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
283 const struct nlmsghdr *unlh) 284 const struct nlmsghdr *unlh)
284{ 285{
285 if (sk->sk_state == TCP_TIME_WAIT) 286 if (sk->sk_state == TCP_TIME_WAIT)
286 return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq, 287 return inet_twsk_diag_fill(sk, skb, r, portid, seq,
287 nlmsg_flags, unlh); 288 nlmsg_flags, unlh);
288 289
289 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, 290 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
@@ -675,7 +676,7 @@ static int inet_twsk_diag_dump(struct sock *sk,
675 if (!inet_diag_bc_sk(bc, sk)) 676 if (!inet_diag_bc_sk(bc, sk))
676 return 0; 677 return 0;
677 678
678 return inet_twsk_diag_fill(inet_twsk(sk), skb, r, 679 return inet_twsk_diag_fill(sk, skb, r,
679 NETLINK_CB(cb->skb).portid, 680 NETLINK_CB(cb->skb).portid,
680 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 681 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
681} 682}
@@ -734,7 +735,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
734 r->idiag_retrans = req->num_retrans; 735 r->idiag_retrans = req->num_retrans;
735 736
736 r->id.idiag_if = sk->sk_bound_dev_if; 737 r->id.idiag_if = sk->sk_bound_dev_if;
737 sock_diag_save_cookie(req, r->id.idiag_cookie); 738
739 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
740 offsetof(struct sock, sk_cookie));
741 sock_diag_save_cookie((struct sock *)ireq, r->id.idiag_cookie);
738 742
739 tmo = req->expires - jiffies; 743 tmo = req->expires - jiffies;
740 if (tmo < 0) 744 if (tmo < 0)
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 6d592f8555fb..2bd980526631 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -195,6 +195,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
195 tw->tw_ipv6only = 0; 195 tw->tw_ipv6only = 0;
196 tw->tw_transparent = inet->transparent; 196 tw->tw_transparent = inet->transparent;
197 tw->tw_prot = sk->sk_prot_creator; 197 tw->tw_prot = sk->sk_prot_creator;
198 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
198 twsk_net_set(tw, hold_net(sock_net(sk))); 199 twsk_net_set(tw, hold_net(sock_net(sk)));
199 /* 200 /*
200 * Because we use RCU lookups, we should not set tw_refcnt 201 * Because we use RCU lookups, we should not set tw_refcnt
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 45fe60c5238e..ece31b426013 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -346,6 +346,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
346 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 346 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
347 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; 347 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
348 treq->listener = NULL; 348 treq->listener = NULL;
349 ireq->ireq_net = sock_net(sk);
349 350
350 /* We throwed the options of the initial SYN away, so we hope 351 /* We throwed the options of the initial SYN away, so we hope
351 * the ACK carries the same options again (see RFC1122 4.2.3.8) 352 * the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fb4cf8b8e121..d7045f5f6ebf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5965,6 +5965,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
5965 5965
5966 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 5966 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
5967 tcp_openreq_init(req, &tmp_opt, skb, sk); 5967 tcp_openreq_init(req, &tmp_opt, skb, sk);
5968 inet_rsk(req)->ireq_net = sock_net(sk);
5969 atomic64_set(&inet_rsk(req)->ir_cookie, 0);
5968 5970
5969 af_ops->init_req(req, sk, skb); 5971 af_ops->init_req(req, sk, skb);
5970 5972