aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_fastopen.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2017-10-18 14:22:51 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-20 08:21:36 -0400
commit1fba70e5b6bed53496ba1f1f16127f5be01b5fb6 (patch)
tree22b060a68ca7b36f052b8f943c91c7ba78b8ddcf /net/ipv4/tcp_fastopen.c
parentce12f7ddff2df63b8f9abf33d6fe020e35de4059 (diff)
tcp: socket option to set TCP fast open key
New socket option TCP_FASTOPEN_KEY to allow different keys per listener. The listener by default uses the global key until the socket option is set. The key is a 16 bytes long binary data. This option has no effect on regular non-listener TCP sockets. Signed-off-by: Yuchung Cheng <ycheng@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Christoph Paasch <cpaasch@apple.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r--net/ipv4/tcp_fastopen.c56
1 files changed, 40 insertions, 16 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 7ee4aadcdd71..21075ce19cb6 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -29,7 +29,7 @@ void tcp_fastopen_init_key_once(struct net *net)
29 * for a valid cookie, so this is an acceptable risk. 29 * for a valid cookie, so this is an acceptable risk.
30 */ 30 */
31 get_random_bytes(key, sizeof(key)); 31 get_random_bytes(key, sizeof(key));
32 tcp_fastopen_reset_cipher(net, key, sizeof(key)); 32 tcp_fastopen_reset_cipher(net, NULL, key, sizeof(key));
33} 33}
34 34
35static void tcp_fastopen_ctx_free(struct rcu_head *head) 35static void tcp_fastopen_ctx_free(struct rcu_head *head)
@@ -40,6 +40,16 @@ static void tcp_fastopen_ctx_free(struct rcu_head *head)
40 kfree(ctx); 40 kfree(ctx);
41} 41}
42 42
43void tcp_fastopen_destroy_cipher(struct sock *sk)
44{
45 struct tcp_fastopen_context *ctx;
46
47 ctx = rcu_dereference_protected(
48 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
49 if (ctx)
50 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
51}
52
43void tcp_fastopen_ctx_destroy(struct net *net) 53void tcp_fastopen_ctx_destroy(struct net *net)
44{ 54{
45 struct tcp_fastopen_context *ctxt; 55 struct tcp_fastopen_context *ctxt;
@@ -55,10 +65,12 @@ void tcp_fastopen_ctx_destroy(struct net *net)
55 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); 65 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
56} 66}
57 67
58int tcp_fastopen_reset_cipher(struct net *net, void *key, unsigned int len) 68int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
69 void *key, unsigned int len)
59{ 70{
60 int err;
61 struct tcp_fastopen_context *ctx, *octx; 71 struct tcp_fastopen_context *ctx, *octx;
72 struct fastopen_queue *q;
73 int err;
62 74
63 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 75 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
64 if (!ctx) 76 if (!ctx)
@@ -79,27 +91,39 @@ error: kfree(ctx);
79 } 91 }
80 memcpy(ctx->key, key, len); 92 memcpy(ctx->key, key, len);
81 93
82 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
83 94
84 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, 95 if (sk) {
85 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); 96 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
86 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); 97 spin_lock_bh(&q->lock);
87 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); 98 octx = rcu_dereference_protected(q->ctx,
99 lockdep_is_held(&q->lock));
100 rcu_assign_pointer(q->ctx, ctx);
101 spin_unlock_bh(&q->lock);
102 } else {
103 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
104 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
105 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
106 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
107 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
108 }
88 109
89 if (octx) 110 if (octx)
90 call_rcu(&octx->rcu, tcp_fastopen_ctx_free); 111 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
91 return err; 112 return err;
92} 113}
93 114
94static bool __tcp_fastopen_cookie_gen(struct net *net, 115static bool __tcp_fastopen_cookie_gen(struct sock *sk, const void *path,
95 const void *path,
96 struct tcp_fastopen_cookie *foc) 116 struct tcp_fastopen_cookie *foc)
97{ 117{
98 struct tcp_fastopen_context *ctx; 118 struct tcp_fastopen_context *ctx;
99 bool ok = false; 119 bool ok = false;
100 120
101 rcu_read_lock(); 121 rcu_read_lock();
102 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); 122
123 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
124 if (!ctx)
125 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
126
103 if (ctx) { 127 if (ctx) {
104 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); 128 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
105 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 129 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
@@ -115,7 +139,7 @@ static bool __tcp_fastopen_cookie_gen(struct net *net,
115 * 139 *
116 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. 140 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
117 */ 141 */
118static bool tcp_fastopen_cookie_gen(struct net *net, 142static bool tcp_fastopen_cookie_gen(struct sock *sk,
119 struct request_sock *req, 143 struct request_sock *req,
120 struct sk_buff *syn, 144 struct sk_buff *syn,
121 struct tcp_fastopen_cookie *foc) 145 struct tcp_fastopen_cookie *foc)
@@ -124,7 +148,7 @@ static bool tcp_fastopen_cookie_gen(struct net *net,
124 const struct iphdr *iph = ip_hdr(syn); 148 const struct iphdr *iph = ip_hdr(syn);
125 149
126 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; 150 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
127 return __tcp_fastopen_cookie_gen(net, path, foc); 151 return __tcp_fastopen_cookie_gen(sk, path, foc);
128 } 152 }
129 153
130#if IS_ENABLED(CONFIG_IPV6) 154#if IS_ENABLED(CONFIG_IPV6)
@@ -132,13 +156,13 @@ static bool tcp_fastopen_cookie_gen(struct net *net,
132 const struct ipv6hdr *ip6h = ipv6_hdr(syn); 156 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
133 struct tcp_fastopen_cookie tmp; 157 struct tcp_fastopen_cookie tmp;
134 158
135 if (__tcp_fastopen_cookie_gen(net, &ip6h->saddr, &tmp)) { 159 if (__tcp_fastopen_cookie_gen(sk, &ip6h->saddr, &tmp)) {
136 struct in6_addr *buf = &tmp.addr; 160 struct in6_addr *buf = &tmp.addr;
137 int i; 161 int i;
138 162
139 for (i = 0; i < 4; i++) 163 for (i = 0; i < 4; i++)
140 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; 164 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
141 return __tcp_fastopen_cookie_gen(net, buf, foc); 165 return __tcp_fastopen_cookie_gen(sk, buf, foc);
142 } 166 }
143 } 167 }
144#endif 168#endif
@@ -313,7 +337,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
313 goto fastopen; 337 goto fastopen;
314 338
315 if (foc->len >= 0 && /* Client presents or requests a cookie */ 339 if (foc->len >= 0 && /* Client presents or requests a cookie */
316 tcp_fastopen_cookie_gen(sock_net(sk), req, skb, &valid_foc) && 340 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc) &&
317 foc->len == TCP_FASTOPEN_COOKIE_SIZE && 341 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
318 foc->len == valid_foc.len && 342 foc->len == valid_foc.len &&
319 !memcmp(foc->val, valid_foc.val, foc->len)) { 343 !memcmp(foc->val, valid_foc.val, foc->len)) {