aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_fastopen.c
diff options
context:
space:
mode:
authorHaishuang Yan <yanhaishuang@cmss.chinamobile.com>2017-09-26 23:35:42 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-01 20:55:54 -0400
commit437138485656c41e32b8c63c0987cfa0348be0e6 (patch)
tree942ae784a68f7d351f62fd3ac48c1330d629ce31 /net/ipv4/tcp_fastopen.c
parentdd000598a39b6937fcefdf143720ec9fb5250e72 (diff)
ipv4: Namespaceify tcp_fastopen_key knob
Different namespace application might require different tcp_fastopen_key independently of the host. David Miller pointed out there is a leak without releasing the context of tcp_fastopen_key during netns teardown. So add the release action in exit_batch path. Tested: 1. Container namespace: # cat /proc/sys/net/ipv4/tcp_fastopen_key: 2817fff2-f803cf97-eadfd1f3-78c0992b cookie key in tcp syn packets: Fast Open Cookie Kind: TCP Fast Open Cookie (34) Length: 10 Fast Open Cookie: 1e5dd82a8c492ca9 2. Host: # cat /proc/sys/net/ipv4/tcp_fastopen_key: 107d7c5f-68eb2ac7-02fb06e6-ed341702 cookie key in tcp syn packets: Fast Open Cookie Kind: TCP Fast Open Cookie (34) Length: 10 Fast Open Cookie: e213c02bf0afbc8a Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r--net/ipv4/tcp_fastopen.c64
1 files changed, 43 insertions, 21 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8c8f0f0af59d..4eae44ac3cb0 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -9,13 +9,18 @@
9#include <net/inetpeer.h> 9#include <net/inetpeer.h>
10#include <net/tcp.h> 10#include <net/tcp.h>
11 11
12struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 12void tcp_fastopen_init_key_once(struct net *net)
13
14static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
15
16void tcp_fastopen_init_key_once(void)
17{ 13{
18 static u8 key[TCP_FASTOPEN_KEY_LENGTH]; 14 u8 key[TCP_FASTOPEN_KEY_LENGTH];
15 struct tcp_fastopen_context *ctxt;
16
17 rcu_read_lock();
18 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
19 if (ctxt) {
20 rcu_read_unlock();
21 return;
22 }
23 rcu_read_unlock();
19 24
20 /* tcp_fastopen_reset_cipher publishes the new context 25 /* tcp_fastopen_reset_cipher publishes the new context
21 * atomically, so we allow this race happening here. 26 * atomically, so we allow this race happening here.
@@ -23,8 +28,8 @@ void tcp_fastopen_init_key_once(void)
23 * All call sites of tcp_fastopen_cookie_gen also check 28 * All call sites of tcp_fastopen_cookie_gen also check
24 * for a valid cookie, so this is an acceptable risk. 29 * for a valid cookie, so this is an acceptable risk.
25 */ 30 */
26 if (net_get_random_once(key, sizeof(key))) 31 get_random_bytes(key, sizeof(key));
27 tcp_fastopen_reset_cipher(key, sizeof(key)); 32 tcp_fastopen_reset_cipher(net, key, sizeof(key));
28} 33}
29 34
30static void tcp_fastopen_ctx_free(struct rcu_head *head) 35static void tcp_fastopen_ctx_free(struct rcu_head *head)
@@ -35,7 +40,22 @@ static void tcp_fastopen_ctx_free(struct rcu_head *head)
35 kfree(ctx); 40 kfree(ctx);
36} 41}
37 42
38int tcp_fastopen_reset_cipher(void *key, unsigned int len) 43void tcp_fastopen_ctx_destroy(struct net *net)
44{
45 struct tcp_fastopen_context *ctxt;
46
47 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
48
49 ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
50 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
51 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
52 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
53
54 if (ctxt)
55 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
56}
57
58int tcp_fastopen_reset_cipher(struct net *net, void *key, unsigned int len)
39{ 59{
40 int err; 60 int err;
41 struct tcp_fastopen_context *ctx, *octx; 61 struct tcp_fastopen_context *ctx, *octx;
@@ -59,26 +79,27 @@ error: kfree(ctx);
59 } 79 }
60 memcpy(ctx->key, key, len); 80 memcpy(ctx->key, key, len);
61 81
62 spin_lock(&tcp_fastopen_ctx_lock); 82 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
63 83
64 octx = rcu_dereference_protected(tcp_fastopen_ctx, 84 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
65 lockdep_is_held(&tcp_fastopen_ctx_lock)); 85 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
66 rcu_assign_pointer(tcp_fastopen_ctx, ctx); 86 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
67 spin_unlock(&tcp_fastopen_ctx_lock); 87 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
68 88
69 if (octx) 89 if (octx)
70 call_rcu(&octx->rcu, tcp_fastopen_ctx_free); 90 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
71 return err; 91 return err;
72} 92}
73 93
74static bool __tcp_fastopen_cookie_gen(const void *path, 94static bool __tcp_fastopen_cookie_gen(struct net *net,
95 const void *path,
75 struct tcp_fastopen_cookie *foc) 96 struct tcp_fastopen_cookie *foc)
76{ 97{
77 struct tcp_fastopen_context *ctx; 98 struct tcp_fastopen_context *ctx;
78 bool ok = false; 99 bool ok = false;
79 100
80 rcu_read_lock(); 101 rcu_read_lock();
81 ctx = rcu_dereference(tcp_fastopen_ctx); 102 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
82 if (ctx) { 103 if (ctx) {
83 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); 104 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
84 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 105 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
@@ -94,7 +115,8 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
94 * 115 *
95 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. 116 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
96 */ 117 */
97static bool tcp_fastopen_cookie_gen(struct request_sock *req, 118static bool tcp_fastopen_cookie_gen(struct net *net,
119 struct request_sock *req,
98 struct sk_buff *syn, 120 struct sk_buff *syn,
99 struct tcp_fastopen_cookie *foc) 121 struct tcp_fastopen_cookie *foc)
100{ 122{
@@ -102,7 +124,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
102 const struct iphdr *iph = ip_hdr(syn); 124 const struct iphdr *iph = ip_hdr(syn);
103 125
104 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; 126 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
105 return __tcp_fastopen_cookie_gen(path, foc); 127 return __tcp_fastopen_cookie_gen(net, path, foc);
106 } 128 }
107 129
108#if IS_ENABLED(CONFIG_IPV6) 130#if IS_ENABLED(CONFIG_IPV6)
@@ -110,13 +132,13 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
110 const struct ipv6hdr *ip6h = ipv6_hdr(syn); 132 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
111 struct tcp_fastopen_cookie tmp; 133 struct tcp_fastopen_cookie tmp;
112 134
113 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 135 if (__tcp_fastopen_cookie_gen(net, &ip6h->saddr, &tmp)) {
114 struct in6_addr *buf = &tmp.addr; 136 struct in6_addr *buf = &tmp.addr;
115 int i; 137 int i;
116 138
117 for (i = 0; i < 4; i++) 139 for (i = 0; i < 4; i++)
118 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; 140 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
119 return __tcp_fastopen_cookie_gen(buf, foc); 141 return __tcp_fastopen_cookie_gen(net, buf, foc);
120 } 142 }
121 } 143 }
122#endif 144#endif
@@ -296,7 +318,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
296 goto fastopen; 318 goto fastopen;
297 319
298 if (foc->len >= 0 && /* Client presents or requests a cookie */ 320 if (foc->len >= 0 && /* Client presents or requests a cookie */
299 tcp_fastopen_cookie_gen(req, skb, &valid_foc) && 321 tcp_fastopen_cookie_gen(sock_net(sk), req, skb, &valid_foc) &&
300 foc->len == TCP_FASTOPEN_COOKIE_SIZE && 322 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
301 foc->len == valid_foc.len && 323 foc->len == valid_foc.len &&
302 !memcmp(foc->val, valid_foc.val, foc->len)) { 324 !memcmp(foc->val, valid_foc.val, foc->len)) {