diff options
author | Atul Gupta <atul.gupta@chelsio.com> | 2018-03-31 12:11:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-31 23:37:32 -0400 |
commit | cc35c88ae4db219611e204375d6a4248bc0e84d6 (patch) | |
tree | 195afa606654aefb9a946a69f9471bc74f9cb280 | |
parent | a089439478734a6a0aa2eabbc03113e0c34db282 (diff) |
crypto : chtls - CPL handler definition
Exchange messages with hardware to program the TLS session
CPL handlers for messages received from chip.
Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Signed-off-by: Michael Werner <werner@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.c | 2126 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 1 |
2 files changed, 2127 insertions, 0 deletions
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c new file mode 100644 index 000000000000..82a473a0cefa --- /dev/null +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c | |||
@@ -0,0 +1,2126 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2018 Chelsio Communications, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * Written by: Atul Gupta (atul.gupta@chelsio.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <linux/workqueue.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <linux/notifier.h> | ||
17 | #include <linux/inetdevice.h> | ||
18 | #include <linux/ip.h> | ||
19 | #include <linux/tcp.h> | ||
20 | #include <linux/sched/signal.h> | ||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/kprobes.h> | ||
23 | #include <linux/if_vlan.h> | ||
24 | #include <net/tcp.h> | ||
25 | #include <net/dst.h> | ||
26 | |||
27 | #include "chtls.h" | ||
28 | #include "chtls_cm.h" | ||
29 | |||
30 | /* | ||
31 | * State transitions and actions for close. Note that if we are in SYN_SENT | ||
32 | * we remain in that state as we cannot control a connection while it's in | ||
33 | * SYN_SENT; such connections are allowed to establish and are then aborted. | ||
34 | */ | ||
35 | static unsigned char new_state[16] = { | ||
36 | /* current state: new state: action: */ | ||
37 | /* (Invalid) */ TCP_CLOSE, | ||
38 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | ||
39 | /* TCP_SYN_SENT */ TCP_SYN_SENT, | ||
40 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | ||
41 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, | ||
42 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, | ||
43 | /* TCP_TIME_WAIT */ TCP_CLOSE, | ||
44 | /* TCP_CLOSE */ TCP_CLOSE, | ||
45 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, | ||
46 | /* TCP_LAST_ACK */ TCP_LAST_ACK, | ||
47 | /* TCP_LISTEN */ TCP_CLOSE, | ||
48 | /* TCP_CLOSING */ TCP_CLOSING, | ||
49 | }; | ||
50 | |||
51 | static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev) | ||
52 | { | ||
53 | struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC); | ||
54 | |||
55 | if (!csk) | ||
56 | return NULL; | ||
57 | |||
58 | csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC); | ||
59 | if (!csk->txdata_skb_cache) { | ||
60 | kfree(csk); | ||
61 | return NULL; | ||
62 | } | ||
63 | |||
64 | kref_init(&csk->kref); | ||
65 | csk->cdev = cdev; | ||
66 | skb_queue_head_init(&csk->txq); | ||
67 | csk->wr_skb_head = NULL; | ||
68 | csk->wr_skb_tail = NULL; | ||
69 | csk->mss = MAX_MSS; | ||
70 | csk->tlshws.ofld = 1; | ||
71 | csk->tlshws.txkey = -1; | ||
72 | csk->tlshws.rxkey = -1; | ||
73 | csk->tlshws.mfs = TLS_MFS; | ||
74 | skb_queue_head_init(&csk->tlshws.sk_recv_queue); | ||
75 | return csk; | ||
76 | } | ||
77 | |||
78 | static void chtls_sock_release(struct kref *ref) | ||
79 | { | ||
80 | struct chtls_sock *csk = | ||
81 | container_of(ref, struct chtls_sock, kref); | ||
82 | |||
83 | kfree(csk); | ||
84 | } | ||
85 | |||
86 | static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev, | ||
87 | struct sock *sk) | ||
88 | { | ||
89 | struct net_device *ndev = cdev->ports[0]; | ||
90 | |||
91 | if (likely(!inet_sk(sk)->inet_rcv_saddr)) | ||
92 | return ndev; | ||
93 | |||
94 | ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); | ||
95 | if (!ndev) | ||
96 | return NULL; | ||
97 | |||
98 | if (is_vlan_dev(ndev)) | ||
99 | return vlan_dev_real_dev(ndev); | ||
100 | return ndev; | ||
101 | } | ||
102 | |||
103 | static void assign_rxopt(struct sock *sk, unsigned int opt) | ||
104 | { | ||
105 | const struct chtls_dev *cdev; | ||
106 | struct chtls_sock *csk; | ||
107 | struct tcp_sock *tp; | ||
108 | |||
109 | csk = rcu_dereference_sk_user_data(sk); | ||
110 | tp = tcp_sk(sk); | ||
111 | |||
112 | cdev = csk->cdev; | ||
113 | tp->tcp_header_len = sizeof(struct tcphdr); | ||
114 | tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40; | ||
115 | tp->mss_cache = tp->rx_opt.mss_clamp; | ||
116 | tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt); | ||
117 | tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt); | ||
118 | tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt); | ||
119 | SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt); | ||
120 | if (!tp->rx_opt.wscale_ok) | ||
121 | tp->rx_opt.rcv_wscale = 0; | ||
122 | if (tp->rx_opt.tstamp_ok) { | ||
123 | tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; | ||
124 | tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED; | ||
125 | } else if (csk->opt2 & TSTAMPS_EN_F) { | ||
126 | csk->opt2 &= ~TSTAMPS_EN_F; | ||
127 | csk->mtu_idx = TCPOPT_MSS_G(opt); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static void chtls_purge_receive_queue(struct sock *sk) | ||
132 | { | ||
133 | struct sk_buff *skb; | ||
134 | |||
135 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { | ||
136 | skb_dst_set(skb, (void *)NULL); | ||
137 | kfree_skb(skb); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static void chtls_purge_write_queue(struct sock *sk) | ||
142 | { | ||
143 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
144 | struct sk_buff *skb; | ||
145 | |||
146 | while ((skb = __skb_dequeue(&csk->txq))) { | ||
147 | sk->sk_wmem_queued -= skb->truesize; | ||
148 | __kfree_skb(skb); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static void chtls_purge_recv_queue(struct sock *sk) | ||
153 | { | ||
154 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
155 | struct chtls_hws *tlsk = &csk->tlshws; | ||
156 | struct sk_buff *skb; | ||
157 | |||
158 | while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) { | ||
159 | skb_dst_set(skb, NULL); | ||
160 | kfree_skb(skb); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | static void abort_arp_failure(void *handle, struct sk_buff *skb) | ||
165 | { | ||
166 | struct cpl_abort_req *req = cplhdr(skb); | ||
167 | struct chtls_dev *cdev; | ||
168 | |||
169 | cdev = (struct chtls_dev *)handle; | ||
170 | req->cmd = CPL_ABORT_NO_RST; | ||
171 | cxgb4_ofld_send(cdev->lldi->ports[0], skb); | ||
172 | } | ||
173 | |||
174 | static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len) | ||
175 | { | ||
176 | if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) { | ||
177 | __skb_trim(skb, 0); | ||
178 | refcount_add(2, &skb->users); | ||
179 | } else { | ||
180 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); | ||
181 | } | ||
182 | return skb; | ||
183 | } | ||
184 | |||
185 | static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb) | ||
186 | { | ||
187 | struct cpl_abort_req *req; | ||
188 | struct chtls_sock *csk; | ||
189 | struct tcp_sock *tp; | ||
190 | |||
191 | csk = rcu_dereference_sk_user_data(sk); | ||
192 | tp = tcp_sk(sk); | ||
193 | |||
194 | if (!skb) | ||
195 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req)); | ||
196 | |||
197 | req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req)); | ||
198 | INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid); | ||
199 | skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); | ||
200 | req->rsvd0 = htonl(tp->snd_nxt); | ||
201 | req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT); | ||
202 | req->cmd = mode; | ||
203 | t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure); | ||
204 | send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST); | ||
205 | } | ||
206 | |||
207 | static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb) | ||
208 | { | ||
209 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
210 | |||
211 | if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) || | ||
212 | !csk->cdev)) { | ||
213 | if (sk->sk_state == TCP_SYN_RECV) | ||
214 | csk_set_flag(csk, CSK_RST_ABORTED); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) { | ||
219 | struct tcp_sock *tp = tcp_sk(sk); | ||
220 | |||
221 | if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0) | ||
222 | WARN_ONCE(1, "send tx flowc error"); | ||
223 | csk_set_flag(csk, CSK_TX_DATA_SENT); | ||
224 | } | ||
225 | |||
226 | csk_set_flag(csk, CSK_ABORT_RPL_PENDING); | ||
227 | chtls_purge_write_queue(sk); | ||
228 | |||
229 | csk_set_flag(csk, CSK_ABORT_SHUTDOWN); | ||
230 | if (sk->sk_state != TCP_SYN_RECV) | ||
231 | chtls_send_abort(sk, mode, skb); | ||
232 | else | ||
233 | goto out; | ||
234 | |||
235 | return; | ||
236 | out: | ||
237 | if (skb) | ||
238 | kfree_skb(skb); | ||
239 | } | ||
240 | |||
241 | static void release_tcp_port(struct sock *sk) | ||
242 | { | ||
243 | if (inet_csk(sk)->icsk_bind_hash) | ||
244 | inet_put_port(sk); | ||
245 | } | ||
246 | |||
247 | static void tcp_uncork(struct sock *sk) | ||
248 | { | ||
249 | struct tcp_sock *tp = tcp_sk(sk); | ||
250 | |||
251 | if (tp->nonagle & TCP_NAGLE_CORK) { | ||
252 | tp->nonagle &= ~TCP_NAGLE_CORK; | ||
253 | chtls_tcp_push(sk, 0); | ||
254 | } | ||
255 | } | ||
256 | |||
257 | static void chtls_close_conn(struct sock *sk) | ||
258 | { | ||
259 | struct cpl_close_con_req *req; | ||
260 | struct chtls_sock *csk; | ||
261 | struct sk_buff *skb; | ||
262 | unsigned int tid; | ||
263 | unsigned int len; | ||
264 | |||
265 | len = roundup(sizeof(struct cpl_close_con_req), 16); | ||
266 | csk = rcu_dereference_sk_user_data(sk); | ||
267 | tid = csk->tid; | ||
268 | |||
269 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); | ||
270 | req = (struct cpl_close_con_req *)__skb_put(skb, len); | ||
271 | memset(req, 0, len); | ||
272 | req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | | ||
273 | FW_WR_IMMDLEN_V(sizeof(*req) - | ||
274 | sizeof(req->wr))); | ||
275 | req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) | | ||
276 | FW_WR_FLOWID_V(tid)); | ||
277 | |||
278 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | ||
279 | |||
280 | tcp_uncork(sk); | ||
281 | skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND); | ||
282 | if (sk->sk_state != TCP_SYN_SENT) | ||
283 | chtls_push_frames(csk, 1); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Perform a state transition during close and return the actions indicated | ||
288 | * for the transition. Do not make this function inline, the main reason | ||
289 | * it exists at all is to avoid multiple inlining of tcp_set_state. | ||
290 | */ | ||
291 | static int make_close_transition(struct sock *sk) | ||
292 | { | ||
293 | int next = (int)new_state[sk->sk_state]; | ||
294 | |||
295 | tcp_set_state(sk, next & TCP_STATE_MASK); | ||
296 | return next & TCP_ACTION_FIN; | ||
297 | } | ||
298 | |||
299 | void chtls_close(struct sock *sk, long timeout) | ||
300 | { | ||
301 | int data_lost, prev_state; | ||
302 | struct chtls_sock *csk; | ||
303 | |||
304 | csk = rcu_dereference_sk_user_data(sk); | ||
305 | |||
306 | lock_sock(sk); | ||
307 | sk->sk_shutdown |= SHUTDOWN_MASK; | ||
308 | |||
309 | data_lost = skb_queue_len(&sk->sk_receive_queue); | ||
310 | data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue); | ||
311 | chtls_purge_recv_queue(sk); | ||
312 | chtls_purge_receive_queue(sk); | ||
313 | |||
314 | if (sk->sk_state == TCP_CLOSE) { | ||
315 | goto wait; | ||
316 | } else if (data_lost || sk->sk_state == TCP_SYN_SENT) { | ||
317 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL); | ||
318 | release_tcp_port(sk); | ||
319 | goto unlock; | ||
320 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | ||
321 | sk->sk_prot->disconnect(sk, 0); | ||
322 | } else if (make_close_transition(sk)) { | ||
323 | chtls_close_conn(sk); | ||
324 | } | ||
325 | wait: | ||
326 | if (timeout) | ||
327 | sk_stream_wait_close(sk, timeout); | ||
328 | |||
329 | unlock: | ||
330 | prev_state = sk->sk_state; | ||
331 | sock_hold(sk); | ||
332 | sock_orphan(sk); | ||
333 | |||
334 | release_sock(sk); | ||
335 | |||
336 | local_bh_disable(); | ||
337 | bh_lock_sock(sk); | ||
338 | |||
339 | if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | ||
340 | goto out; | ||
341 | |||
342 | if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 && | ||
343 | !csk_flag(sk, CSK_ABORT_SHUTDOWN)) { | ||
344 | struct sk_buff *skb; | ||
345 | |||
346 | skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); | ||
347 | if (skb) | ||
348 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb); | ||
349 | } | ||
350 | |||
351 | if (sk->sk_state == TCP_CLOSE) | ||
352 | inet_csk_destroy_sock(sk); | ||
353 | |||
354 | out: | ||
355 | bh_unlock_sock(sk); | ||
356 | local_bh_enable(); | ||
357 | sock_put(sk); | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Wait until a socket enters on of the given states. | ||
362 | */ | ||
363 | static int wait_for_states(struct sock *sk, unsigned int states) | ||
364 | { | ||
365 | DECLARE_WAITQUEUE(wait, current); | ||
366 | struct socket_wq _sk_wq; | ||
367 | long current_timeo; | ||
368 | int err = 0; | ||
369 | |||
370 | current_timeo = 200; | ||
371 | |||
372 | /* | ||
373 | * We want this to work even when there's no associated struct socket. | ||
374 | * In that case we provide a temporary wait_queue_head_t. | ||
375 | */ | ||
376 | if (!sk->sk_wq) { | ||
377 | init_waitqueue_head(&_sk_wq.wait); | ||
378 | _sk_wq.fasync_list = NULL; | ||
379 | init_rcu_head_on_stack(&_sk_wq.rcu); | ||
380 | RCU_INIT_POINTER(sk->sk_wq, &_sk_wq); | ||
381 | } | ||
382 | |||
383 | add_wait_queue(sk_sleep(sk), &wait); | ||
384 | while (!sk_in_state(sk, states)) { | ||
385 | if (!current_timeo) { | ||
386 | err = -EBUSY; | ||
387 | break; | ||
388 | } | ||
389 | if (signal_pending(current)) { | ||
390 | err = sock_intr_errno(current_timeo); | ||
391 | break; | ||
392 | } | ||
393 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
394 | release_sock(sk); | ||
395 | if (!sk_in_state(sk, states)) | ||
396 | current_timeo = schedule_timeout(current_timeo); | ||
397 | __set_current_state(TASK_RUNNING); | ||
398 | lock_sock(sk); | ||
399 | } | ||
400 | remove_wait_queue(sk_sleep(sk), &wait); | ||
401 | |||
402 | if (rcu_dereference(sk->sk_wq) == &_sk_wq) | ||
403 | sk->sk_wq = NULL; | ||
404 | return err; | ||
405 | } | ||
406 | |||
407 | int chtls_disconnect(struct sock *sk, int flags) | ||
408 | { | ||
409 | struct chtls_sock *csk; | ||
410 | struct tcp_sock *tp; | ||
411 | int err; | ||
412 | |||
413 | tp = tcp_sk(sk); | ||
414 | csk = rcu_dereference_sk_user_data(sk); | ||
415 | chtls_purge_recv_queue(sk); | ||
416 | chtls_purge_receive_queue(sk); | ||
417 | chtls_purge_write_queue(sk); | ||
418 | |||
419 | if (sk->sk_state != TCP_CLOSE) { | ||
420 | sk->sk_err = ECONNRESET; | ||
421 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL); | ||
422 | err = wait_for_states(sk, TCPF_CLOSE); | ||
423 | if (err) | ||
424 | return err; | ||
425 | } | ||
426 | chtls_purge_recv_queue(sk); | ||
427 | chtls_purge_receive_queue(sk); | ||
428 | tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale); | ||
429 | return tcp_disconnect(sk, flags); | ||
430 | } | ||
431 | |||
432 | #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \ | ||
433 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT) | ||
434 | void chtls_shutdown(struct sock *sk, int how) | ||
435 | { | ||
436 | if ((how & SEND_SHUTDOWN) && | ||
437 | sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) && | ||
438 | make_close_transition(sk)) | ||
439 | chtls_close_conn(sk); | ||
440 | } | ||
441 | |||
442 | void chtls_destroy_sock(struct sock *sk) | ||
443 | { | ||
444 | struct chtls_sock *csk; | ||
445 | |||
446 | csk = rcu_dereference_sk_user_data(sk); | ||
447 | chtls_purge_recv_queue(sk); | ||
448 | csk->ulp_mode = ULP_MODE_NONE; | ||
449 | chtls_purge_write_queue(sk); | ||
450 | free_tls_keyid(sk); | ||
451 | kref_put(&csk->kref, chtls_sock_release); | ||
452 | sk->sk_prot = &tcp_prot; | ||
453 | sk->sk_prot->destroy(sk); | ||
454 | } | ||
455 | |||
456 | static void reset_listen_child(struct sock *child) | ||
457 | { | ||
458 | struct chtls_sock *csk = rcu_dereference_sk_user_data(child); | ||
459 | struct sk_buff *skb; | ||
460 | |||
461 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, | ||
462 | sizeof(struct cpl_abort_req)); | ||
463 | |||
464 | chtls_send_reset(child, CPL_ABORT_SEND_RST, skb); | ||
465 | sock_orphan(child); | ||
466 | INC_ORPHAN_COUNT(child); | ||
467 | if (child->sk_state == TCP_CLOSE) | ||
468 | inet_csk_destroy_sock(child); | ||
469 | } | ||
470 | |||
471 | static void chtls_disconnect_acceptq(struct sock *listen_sk) | ||
472 | { | ||
473 | struct request_sock **pprev; | ||
474 | |||
475 | pprev = ACCEPT_QUEUE(listen_sk); | ||
476 | while (*pprev) { | ||
477 | struct request_sock *req = *pprev; | ||
478 | |||
479 | if (req->rsk_ops == &chtls_rsk_ops) { | ||
480 | struct sock *child = req->sk; | ||
481 | |||
482 | *pprev = req->dl_next; | ||
483 | sk_acceptq_removed(listen_sk); | ||
484 | reqsk_put(req); | ||
485 | sock_hold(child); | ||
486 | local_bh_disable(); | ||
487 | bh_lock_sock(child); | ||
488 | release_tcp_port(child); | ||
489 | reset_listen_child(child); | ||
490 | bh_unlock_sock(child); | ||
491 | local_bh_enable(); | ||
492 | sock_put(child); | ||
493 | } else { | ||
494 | pprev = &req->dl_next; | ||
495 | } | ||
496 | } | ||
497 | } | ||
498 | |||
499 | static int listen_hashfn(const struct sock *sk) | ||
500 | { | ||
501 | return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1); | ||
502 | } | ||
503 | |||
504 | static struct listen_info *listen_hash_add(struct chtls_dev *cdev, | ||
505 | struct sock *sk, | ||
506 | unsigned int stid) | ||
507 | { | ||
508 | struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
509 | |||
510 | if (p) { | ||
511 | int key = listen_hashfn(sk); | ||
512 | |||
513 | p->sk = sk; | ||
514 | p->stid = stid; | ||
515 | spin_lock(&cdev->listen_lock); | ||
516 | p->next = cdev->listen_hash_tab[key]; | ||
517 | cdev->listen_hash_tab[key] = p; | ||
518 | spin_unlock(&cdev->listen_lock); | ||
519 | } | ||
520 | return p; | ||
521 | } | ||
522 | |||
523 | static int listen_hash_find(struct chtls_dev *cdev, | ||
524 | struct sock *sk) | ||
525 | { | ||
526 | struct listen_info *p; | ||
527 | int stid = -1; | ||
528 | int key; | ||
529 | |||
530 | key = listen_hashfn(sk); | ||
531 | |||
532 | spin_lock(&cdev->listen_lock); | ||
533 | for (p = cdev->listen_hash_tab[key]; p; p = p->next) | ||
534 | if (p->sk == sk) { | ||
535 | stid = p->stid; | ||
536 | break; | ||
537 | } | ||
538 | spin_unlock(&cdev->listen_lock); | ||
539 | return stid; | ||
540 | } | ||
541 | |||
542 | static int listen_hash_del(struct chtls_dev *cdev, | ||
543 | struct sock *sk) | ||
544 | { | ||
545 | struct listen_info *p, **prev; | ||
546 | int stid = -1; | ||
547 | int key; | ||
548 | |||
549 | key = listen_hashfn(sk); | ||
550 | prev = &cdev->listen_hash_tab[key]; | ||
551 | |||
552 | spin_lock(&cdev->listen_lock); | ||
553 | for (p = *prev; p; prev = &p->next, p = p->next) | ||
554 | if (p->sk == sk) { | ||
555 | stid = p->stid; | ||
556 | *prev = p->next; | ||
557 | kfree(p); | ||
558 | break; | ||
559 | } | ||
560 | spin_unlock(&cdev->listen_lock); | ||
561 | return stid; | ||
562 | } | ||
563 | |||
564 | static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent) | ||
565 | { | ||
566 | struct request_sock *req; | ||
567 | struct chtls_sock *csk; | ||
568 | |||
569 | csk = rcu_dereference_sk_user_data(child); | ||
570 | req = csk->passive_reap_next; | ||
571 | |||
572 | reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req); | ||
573 | __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq); | ||
574 | chtls_reqsk_free(req); | ||
575 | csk->passive_reap_next = NULL; | ||
576 | } | ||
577 | |||
578 | static void chtls_reset_synq(struct listen_ctx *listen_ctx) | ||
579 | { | ||
580 | struct sock *listen_sk = listen_ctx->lsk; | ||
581 | |||
582 | while (!skb_queue_empty(&listen_ctx->synq)) { | ||
583 | struct chtls_sock *csk = | ||
584 | container_of((struct synq *)__skb_dequeue | ||
585 | (&listen_ctx->synq), struct chtls_sock, synq); | ||
586 | struct sock *child = csk->sk; | ||
587 | |||
588 | cleanup_syn_rcv_conn(child, listen_sk); | ||
589 | sock_hold(child); | ||
590 | local_bh_disable(); | ||
591 | bh_lock_sock(child); | ||
592 | release_tcp_port(child); | ||
593 | reset_listen_child(child); | ||
594 | bh_unlock_sock(child); | ||
595 | local_bh_enable(); | ||
596 | sock_put(child); | ||
597 | } | ||
598 | } | ||
599 | |||
600 | int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) | ||
601 | { | ||
602 | struct net_device *ndev; | ||
603 | struct listen_ctx *ctx; | ||
604 | struct adapter *adap; | ||
605 | struct port_info *pi; | ||
606 | int stid; | ||
607 | int ret; | ||
608 | |||
609 | if (sk->sk_family != PF_INET) | ||
610 | return -EAGAIN; | ||
611 | |||
612 | rcu_read_lock(); | ||
613 | ndev = chtls_ipv4_netdev(cdev, sk); | ||
614 | rcu_read_unlock(); | ||
615 | if (!ndev) | ||
616 | return -EBADF; | ||
617 | |||
618 | pi = netdev_priv(ndev); | ||
619 | adap = pi->adapter; | ||
620 | if (!(adap->flags & FULL_INIT_DONE)) | ||
621 | return -EBADF; | ||
622 | |||
623 | if (listen_hash_find(cdev, sk) >= 0) /* already have it */ | ||
624 | return -EADDRINUSE; | ||
625 | |||
626 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
627 | if (!ctx) | ||
628 | return -ENOMEM; | ||
629 | |||
630 | __module_get(THIS_MODULE); | ||
631 | ctx->lsk = sk; | ||
632 | ctx->cdev = cdev; | ||
633 | ctx->state = T4_LISTEN_START_PENDING; | ||
634 | skb_queue_head_init(&ctx->synq); | ||
635 | |||
636 | stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx); | ||
637 | if (stid < 0) | ||
638 | goto free_ctx; | ||
639 | |||
640 | sock_hold(sk); | ||
641 | if (!listen_hash_add(cdev, sk, stid)) | ||
642 | goto free_stid; | ||
643 | |||
644 | ret = cxgb4_create_server(ndev, stid, | ||
645 | inet_sk(sk)->inet_rcv_saddr, | ||
646 | inet_sk(sk)->inet_sport, 0, | ||
647 | cdev->lldi->rxq_ids[0]); | ||
648 | if (ret > 0) | ||
649 | ret = net_xmit_errno(ret); | ||
650 | if (ret) | ||
651 | goto del_hash; | ||
652 | return 0; | ||
653 | del_hash: | ||
654 | listen_hash_del(cdev, sk); | ||
655 | free_stid: | ||
656 | cxgb4_free_stid(cdev->tids, stid, sk->sk_family); | ||
657 | sock_put(sk); | ||
658 | free_ctx: | ||
659 | kfree(ctx); | ||
660 | module_put(THIS_MODULE); | ||
661 | return -EBADF; | ||
662 | } | ||
663 | |||
664 | void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) | ||
665 | { | ||
666 | struct listen_ctx *listen_ctx; | ||
667 | int stid; | ||
668 | |||
669 | stid = listen_hash_del(cdev, sk); | ||
670 | if (stid < 0) | ||
671 | return; | ||
672 | |||
673 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); | ||
674 | chtls_reset_synq(listen_ctx); | ||
675 | |||
676 | cxgb4_remove_server(cdev->lldi->ports[0], stid, | ||
677 | cdev->lldi->rxq_ids[0], 0); | ||
678 | chtls_disconnect_acceptq(sk); | ||
679 | } | ||
680 | |||
681 | static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb) | ||
682 | { | ||
683 | struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR; | ||
684 | unsigned int stid = GET_TID(rpl); | ||
685 | struct listen_ctx *listen_ctx; | ||
686 | |||
687 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); | ||
688 | if (!listen_ctx) | ||
689 | return CPL_RET_BUF_DONE; | ||
690 | |||
691 | if (listen_ctx->state == T4_LISTEN_START_PENDING) { | ||
692 | listen_ctx->state = T4_LISTEN_STARTED; | ||
693 | return CPL_RET_BUF_DONE; | ||
694 | } | ||
695 | |||
696 | if (rpl->status != CPL_ERR_NONE) { | ||
697 | pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n", | ||
698 | rpl->status, stid); | ||
699 | return CPL_RET_BUF_DONE; | ||
700 | } | ||
701 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); | ||
702 | sock_put(listen_ctx->lsk); | ||
703 | kfree(listen_ctx); | ||
704 | module_put(THIS_MODULE); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) | ||
710 | { | ||
711 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR; | ||
712 | struct listen_ctx *listen_ctx; | ||
713 | unsigned int stid; | ||
714 | void *data; | ||
715 | |||
716 | stid = GET_TID(rpl); | ||
717 | data = lookup_stid(cdev->tids, stid); | ||
718 | listen_ctx = (struct listen_ctx *)data; | ||
719 | |||
720 | if (rpl->status != CPL_ERR_NONE) { | ||
721 | pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n", | ||
722 | rpl->status, stid); | ||
723 | return CPL_RET_BUF_DONE; | ||
724 | } | ||
725 | |||
726 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); | ||
727 | sock_put(listen_ctx->lsk); | ||
728 | kfree(listen_ctx); | ||
729 | module_put(THIS_MODULE); | ||
730 | |||
731 | return 0; | ||
732 | } | ||
733 | |||
734 | static void chtls_release_resources(struct sock *sk) | ||
735 | { | ||
736 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
737 | struct chtls_dev *cdev = csk->cdev; | ||
738 | unsigned int tid = csk->tid; | ||
739 | struct tid_info *tids; | ||
740 | |||
741 | if (!cdev) | ||
742 | return; | ||
743 | |||
744 | tids = cdev->tids; | ||
745 | kfree_skb(csk->txdata_skb_cache); | ||
746 | csk->txdata_skb_cache = NULL; | ||
747 | |||
748 | if (csk->l2t_entry) { | ||
749 | cxgb4_l2t_release(csk->l2t_entry); | ||
750 | csk->l2t_entry = NULL; | ||
751 | } | ||
752 | |||
753 | cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family); | ||
754 | sock_put(sk); | ||
755 | } | ||
756 | |||
757 | static void chtls_conn_done(struct sock *sk) | ||
758 | { | ||
759 | if (sock_flag(sk, SOCK_DEAD)) | ||
760 | chtls_purge_receive_queue(sk); | ||
761 | sk_wakeup_sleepers(sk, 0); | ||
762 | tcp_done(sk); | ||
763 | } | ||
764 | |||
765 | static void do_abort_syn_rcv(struct sock *child, struct sock *parent) | ||
766 | { | ||
767 | /* | ||
768 | * If the server is still open we clean up the child connection, | ||
769 | * otherwise the server already did the clean up as it was purging | ||
770 | * its SYN queue and the skb was just sitting in its backlog. | ||
771 | */ | ||
772 | if (likely(parent->sk_state == TCP_LISTEN)) { | ||
773 | cleanup_syn_rcv_conn(child, parent); | ||
774 | /* Without the below call to sock_orphan, | ||
775 | * we leak the socket resource with syn_flood test | ||
776 | * as inet_csk_destroy_sock will not be called | ||
777 | * in tcp_done since SOCK_DEAD flag is not set. | ||
778 | * Kernel handles this differently where new socket is | ||
779 | * created only after 3 way handshake is done. | ||
780 | */ | ||
781 | sock_orphan(child); | ||
782 | percpu_counter_inc((child)->sk_prot->orphan_count); | ||
783 | chtls_release_resources(child); | ||
784 | chtls_conn_done(child); | ||
785 | } else { | ||
786 | if (csk_flag(child, CSK_RST_ABORTED)) { | ||
787 | chtls_release_resources(child); | ||
788 | chtls_conn_done(child); | ||
789 | } | ||
790 | } | ||
791 | } | ||
792 | |||
793 | static void pass_open_abort(struct sock *child, struct sock *parent, | ||
794 | struct sk_buff *skb) | ||
795 | { | ||
796 | do_abort_syn_rcv(child, parent); | ||
797 | kfree_skb(skb); | ||
798 | } | ||
799 | |||
800 | static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb) | ||
801 | { | ||
802 | pass_open_abort(skb->sk, lsk, skb); | ||
803 | } | ||
804 | |||
805 | static void chtls_pass_open_arp_failure(struct sock *sk, | ||
806 | struct sk_buff *skb) | ||
807 | { | ||
808 | const struct request_sock *oreq; | ||
809 | struct chtls_sock *csk; | ||
810 | struct chtls_dev *cdev; | ||
811 | struct sock *parent; | ||
812 | void *data; | ||
813 | |||
814 | csk = rcu_dereference_sk_user_data(sk); | ||
815 | cdev = csk->cdev; | ||
816 | |||
817 | /* | ||
818 | * If the connection is being aborted due to the parent listening | ||
819 | * socket going away there's nothing to do, the ABORT_REQ will close | ||
820 | * the connection. | ||
821 | */ | ||
822 | if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) { | ||
823 | kfree_skb(skb); | ||
824 | return; | ||
825 | } | ||
826 | |||
827 | oreq = csk->passive_reap_next; | ||
828 | data = lookup_stid(cdev->tids, oreq->ts_recent); | ||
829 | parent = ((struct listen_ctx *)data)->lsk; | ||
830 | |||
831 | bh_lock_sock(parent); | ||
832 | if (!sock_owned_by_user(parent)) { | ||
833 | pass_open_abort(sk, parent, skb); | ||
834 | } else { | ||
835 | BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort; | ||
836 | __sk_add_backlog(parent, skb); | ||
837 | } | ||
838 | bh_unlock_sock(parent); | ||
839 | } | ||
840 | |||
841 | static void chtls_accept_rpl_arp_failure(void *handle, | ||
842 | struct sk_buff *skb) | ||
843 | { | ||
844 | struct sock *sk = (struct sock *)handle; | ||
845 | |||
846 | sock_hold(sk); | ||
847 | process_cpl_msg(chtls_pass_open_arp_failure, sk, skb); | ||
848 | sock_put(sk); | ||
849 | } | ||
850 | |||
851 | static unsigned int chtls_select_mss(const struct chtls_sock *csk, | ||
852 | unsigned int pmtu, | ||
853 | struct cpl_pass_accept_req *req) | ||
854 | { | ||
855 | struct chtls_dev *cdev; | ||
856 | struct dst_entry *dst; | ||
857 | unsigned int tcpoptsz; | ||
858 | unsigned int iphdrsz; | ||
859 | unsigned int mtu_idx; | ||
860 | struct tcp_sock *tp; | ||
861 | unsigned int mss; | ||
862 | struct sock *sk; | ||
863 | |||
864 | mss = ntohs(req->tcpopt.mss); | ||
865 | sk = csk->sk; | ||
866 | dst = __sk_dst_get(sk); | ||
867 | cdev = csk->cdev; | ||
868 | tp = tcp_sk(sk); | ||
869 | tcpoptsz = 0; | ||
870 | |||
871 | iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); | ||
872 | if (req->tcpopt.tstamp) | ||
873 | tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); | ||
874 | |||
875 | tp->advmss = dst_metric_advmss(dst); | ||
876 | if (USER_MSS(tp) && tp->advmss > USER_MSS(tp)) | ||
877 | tp->advmss = USER_MSS(tp); | ||
878 | if (tp->advmss > pmtu - iphdrsz) | ||
879 | tp->advmss = pmtu - iphdrsz; | ||
880 | if (mss && tp->advmss > mss) | ||
881 | tp->advmss = mss; | ||
882 | |||
883 | tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus, | ||
884 | iphdrsz + tcpoptsz, | ||
885 | tp->advmss - tcpoptsz, | ||
886 | 8, &mtu_idx); | ||
887 | tp->advmss -= iphdrsz; | ||
888 | |||
889 | inet_csk(sk)->icsk_pmtu_cookie = pmtu; | ||
890 | return mtu_idx; | ||
891 | } | ||
892 | |||
893 | static unsigned int select_rcv_wnd(struct chtls_sock *csk) | ||
894 | { | ||
895 | unsigned int rcvwnd; | ||
896 | unsigned int wnd; | ||
897 | struct sock *sk; | ||
898 | |||
899 | sk = csk->sk; | ||
900 | wnd = tcp_full_space(sk); | ||
901 | |||
902 | if (wnd < MIN_RCV_WND) | ||
903 | wnd = MIN_RCV_WND; | ||
904 | |||
905 | rcvwnd = MAX_RCV_WND; | ||
906 | |||
907 | csk_set_flag(csk, CSK_UPDATE_RCV_WND); | ||
908 | return min(wnd, rcvwnd); | ||
909 | } | ||
910 | |||
911 | static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp) | ||
912 | { | ||
913 | int wscale = 0; | ||
914 | |||
915 | if (space > MAX_RCV_WND) | ||
916 | space = MAX_RCV_WND; | ||
917 | if (win_clamp && win_clamp < space) | ||
918 | space = win_clamp; | ||
919 | |||
920 | if (wscale_ok) { | ||
921 | while (wscale < 14 && (65535 << wscale) < space) | ||
922 | wscale++; | ||
923 | } | ||
924 | return wscale; | ||
925 | } | ||
926 | |||
927 | static void chtls_pass_accept_rpl(struct sk_buff *skb, | ||
928 | struct cpl_pass_accept_req *req, | ||
929 | unsigned int tid) | ||
930 | |||
931 | { | ||
932 | struct cpl_t5_pass_accept_rpl *rpl5; | ||
933 | struct cxgb4_lld_info *lldi; | ||
934 | const struct tcphdr *tcph; | ||
935 | const struct tcp_sock *tp; | ||
936 | struct chtls_sock *csk; | ||
937 | unsigned int len; | ||
938 | struct sock *sk; | ||
939 | u32 opt2, hlen; | ||
940 | u64 opt0; | ||
941 | |||
942 | sk = skb->sk; | ||
943 | tp = tcp_sk(sk); | ||
944 | csk = sk->sk_user_data; | ||
945 | csk->tid = tid; | ||
946 | lldi = csk->cdev->lldi; | ||
947 | len = roundup(sizeof(*rpl5), 16); | ||
948 | |||
949 | rpl5 = __skb_put_zero(skb, len); | ||
950 | INIT_TP_WR(rpl5, tid); | ||
951 | |||
952 | OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
953 | csk->tid)); | ||
954 | csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)), | ||
955 | req); | ||
956 | opt0 = TCAM_BYPASS_F | | ||
957 | WND_SCALE_V((tp)->rx_opt.rcv_wscale) | | ||
958 | MSS_IDX_V(csk->mtu_idx) | | ||
959 | L2T_IDX_V(csk->l2t_entry->idx) | | ||
960 | NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) | | ||
961 | TX_CHAN_V(csk->tx_chan) | | ||
962 | SMAC_SEL_V(csk->smac_idx) | | ||
963 | DSCP_V(csk->tos >> 2) | | ||
964 | ULP_MODE_V(ULP_MODE_TLS) | | ||
965 | RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M)); | ||
966 | |||
967 | opt2 = RX_CHANNEL_V(0) | | ||
968 | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); | ||
969 | |||
970 | if (!is_t5(lldi->adapter_type)) | ||
971 | opt2 |= RX_FC_DISABLE_F; | ||
972 | if (req->tcpopt.tstamp) | ||
973 | opt2 |= TSTAMPS_EN_F; | ||
974 | if (req->tcpopt.sack) | ||
975 | opt2 |= SACK_EN_F; | ||
976 | hlen = ntohl(req->hdr_len); | ||
977 | |||
978 | tcph = (struct tcphdr *)((u8 *)(req + 1) + | ||
979 | T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); | ||
980 | if (tcph->ece && tcph->cwr) | ||
981 | opt2 |= CCTRL_ECN_V(1); | ||
982 | opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); | ||
983 | opt2 |= T5_ISS_F; | ||
984 | opt2 |= T5_OPT_2_VALID_F; | ||
985 | rpl5->opt0 = cpu_to_be64(opt0); | ||
986 | rpl5->opt2 = cpu_to_be32(opt2); | ||
987 | rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); | ||
988 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); | ||
989 | t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure); | ||
990 | cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); | ||
991 | } | ||
992 | |||
993 | static void inet_inherit_port(struct inet_hashinfo *hash_info, | ||
994 | struct sock *lsk, struct sock *newsk) | ||
995 | { | ||
996 | local_bh_disable(); | ||
997 | __inet_inherit_port(lsk, newsk); | ||
998 | local_bh_enable(); | ||
999 | } | ||
1000 | |||
1001 | static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb) | ||
1002 | { | ||
1003 | if (skb->protocol) { | ||
1004 | kfree_skb(skb); | ||
1005 | return 0; | ||
1006 | } | ||
1007 | BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); | ||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | static struct sock *chtls_recv_sock(struct sock *lsk, | ||
1012 | struct request_sock *oreq, | ||
1013 | void *network_hdr, | ||
1014 | const struct cpl_pass_accept_req *req, | ||
1015 | struct chtls_dev *cdev) | ||
1016 | { | ||
1017 | const struct tcphdr *tcph; | ||
1018 | struct inet_sock *newinet; | ||
1019 | const struct iphdr *iph; | ||
1020 | struct net_device *ndev; | ||
1021 | struct chtls_sock *csk; | ||
1022 | struct dst_entry *dst; | ||
1023 | struct neighbour *n; | ||
1024 | struct tcp_sock *tp; | ||
1025 | struct sock *newsk; | ||
1026 | u16 port_id; | ||
1027 | int rxq_idx; | ||
1028 | int step; | ||
1029 | |||
1030 | iph = (const struct iphdr *)network_hdr; | ||
1031 | newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); | ||
1032 | if (!newsk) | ||
1033 | goto free_oreq; | ||
1034 | |||
1035 | dst = inet_csk_route_child_sock(lsk, newsk, oreq); | ||
1036 | if (!dst) | ||
1037 | goto free_sk; | ||
1038 | |||
1039 | tcph = (struct tcphdr *)(iph + 1); | ||
1040 | n = dst_neigh_lookup(dst, &iph->saddr); | ||
1041 | if (!n) | ||
1042 | goto free_sk; | ||
1043 | |||
1044 | ndev = n->dev; | ||
1045 | if (!ndev) | ||
1046 | goto free_dst; | ||
1047 | port_id = cxgb4_port_idx(ndev); | ||
1048 | |||
1049 | csk = chtls_sock_create(cdev); | ||
1050 | if (!csk) | ||
1051 | goto free_dst; | ||
1052 | |||
1053 | csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0); | ||
1054 | if (!csk->l2t_entry) | ||
1055 | goto free_csk; | ||
1056 | |||
1057 | newsk->sk_user_data = csk; | ||
1058 | newsk->sk_backlog_rcv = chtls_backlog_rcv; | ||
1059 | |||
1060 | tp = tcp_sk(newsk); | ||
1061 | newinet = inet_sk(newsk); | ||
1062 | |||
1063 | newinet->inet_daddr = iph->saddr; | ||
1064 | newinet->inet_rcv_saddr = iph->daddr; | ||
1065 | newinet->inet_saddr = iph->daddr; | ||
1066 | |||
1067 | oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); | ||
1068 | sk_setup_caps(newsk, dst); | ||
1069 | csk->sk = newsk; | ||
1070 | csk->passive_reap_next = oreq; | ||
1071 | csk->tx_chan = cxgb4_port_chan(ndev); | ||
1072 | csk->port_id = port_id; | ||
1073 | csk->egress_dev = ndev; | ||
1074 | csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); | ||
1075 | csk->ulp_mode = ULP_MODE_TLS; | ||
1076 | step = cdev->lldi->nrxq / cdev->lldi->nchan; | ||
1077 | csk->rss_qid = cdev->lldi->rxq_ids[port_id * step]; | ||
1078 | rxq_idx = port_id * step; | ||
1079 | csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : | ||
1080 | port_id * step; | ||
1081 | csk->sndbuf = newsk->sk_sndbuf; | ||
1082 | csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type, | ||
1083 | cxgb4_port_viid(ndev)); | ||
1084 | tp->rcv_wnd = select_rcv_wnd(csk); | ||
1085 | RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), | ||
1086 | WSCALE_OK(tp), | ||
1087 | tp->window_clamp); | ||
1088 | neigh_release(n); | ||
1089 | inet_inherit_port(&tcp_hashinfo, lsk, newsk); | ||
1090 | csk_set_flag(csk, CSK_CONN_INLINE); | ||
1091 | bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */ | ||
1092 | |||
1093 | return newsk; | ||
1094 | free_csk: | ||
1095 | chtls_sock_release(&csk->kref); | ||
1096 | free_dst: | ||
1097 | dst_release(dst); | ||
1098 | free_sk: | ||
1099 | inet_csk_prepare_forced_close(newsk); | ||
1100 | tcp_done(newsk); | ||
1101 | free_oreq: | ||
1102 | chtls_reqsk_free(oreq); | ||
1103 | return NULL; | ||
1104 | } | ||
1105 | |||
1106 | /* | ||
1107 | * Populate a TID_RELEASE WR. The skb must be already propely sized. | ||
1108 | */ | ||
1109 | static void mk_tid_release(struct sk_buff *skb, | ||
1110 | unsigned int chan, unsigned int tid) | ||
1111 | { | ||
1112 | struct cpl_tid_release *req; | ||
1113 | unsigned int len; | ||
1114 | |||
1115 | len = roundup(sizeof(struct cpl_tid_release), 16); | ||
1116 | req = (struct cpl_tid_release *)__skb_put(skb, len); | ||
1117 | memset(req, 0, len); | ||
1118 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); | ||
1119 | INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid); | ||
1120 | } | ||
1121 | |||
1122 | static int chtls_get_module(struct sock *sk) | ||
1123 | { | ||
1124 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1125 | |||
1126 | if (!try_module_get(icsk->icsk_ulp_ops->owner)) | ||
1127 | return -1; | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | static void chtls_pass_accept_request(struct sock *sk, | ||
1133 | struct sk_buff *skb) | ||
1134 | { | ||
1135 | struct cpl_t5_pass_accept_rpl *rpl; | ||
1136 | struct cpl_pass_accept_req *req; | ||
1137 | struct listen_ctx *listen_ctx; | ||
1138 | struct request_sock *oreq; | ||
1139 | struct sk_buff *reply_skb; | ||
1140 | struct chtls_sock *csk; | ||
1141 | struct chtls_dev *cdev; | ||
1142 | struct tcphdr *tcph; | ||
1143 | struct sock *newsk; | ||
1144 | struct ethhdr *eh; | ||
1145 | struct iphdr *iph; | ||
1146 | void *network_hdr; | ||
1147 | unsigned int stid; | ||
1148 | unsigned int len; | ||
1149 | unsigned int tid; | ||
1150 | |||
1151 | req = cplhdr(skb) + RSS_HDR; | ||
1152 | tid = GET_TID(req); | ||
1153 | cdev = BLOG_SKB_CB(skb)->cdev; | ||
1154 | newsk = lookup_tid(cdev->tids, tid); | ||
1155 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); | ||
1156 | if (newsk) { | ||
1157 | pr_info("tid (%d) already in use\n", tid); | ||
1158 | return; | ||
1159 | } | ||
1160 | |||
1161 | len = roundup(sizeof(*rpl), 16); | ||
1162 | reply_skb = alloc_skb(len, GFP_ATOMIC); | ||
1163 | if (!reply_skb) { | ||
1164 | cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family); | ||
1165 | kfree_skb(skb); | ||
1166 | return; | ||
1167 | } | ||
1168 | |||
1169 | if (sk->sk_state != TCP_LISTEN) | ||
1170 | goto reject; | ||
1171 | |||
1172 | if (inet_csk_reqsk_queue_is_full(sk)) | ||
1173 | goto reject; | ||
1174 | |||
1175 | if (sk_acceptq_is_full(sk)) | ||
1176 | goto reject; | ||
1177 | |||
1178 | oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); | ||
1179 | if (!oreq) | ||
1180 | goto reject; | ||
1181 | |||
1182 | oreq->rsk_rcv_wnd = 0; | ||
1183 | oreq->rsk_window_clamp = 0; | ||
1184 | oreq->cookie_ts = 0; | ||
1185 | oreq->mss = 0; | ||
1186 | oreq->ts_recent = 0; | ||
1187 | |||
1188 | eh = (struct ethhdr *)(req + 1); | ||
1189 | iph = (struct iphdr *)(eh + 1); | ||
1190 | if (iph->version != 0x4) | ||
1191 | goto free_oreq; | ||
1192 | |||
1193 | network_hdr = (void *)(eh + 1); | ||
1194 | tcph = (struct tcphdr *)(iph + 1); | ||
1195 | |||
1196 | tcp_rsk(oreq)->tfo_listener = false; | ||
1197 | tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); | ||
1198 | chtls_set_req_port(oreq, tcph->source, tcph->dest); | ||
1199 | inet_rsk(oreq)->ecn_ok = 0; | ||
1200 | chtls_set_req_addr(oreq, iph->daddr, iph->saddr); | ||
1201 | if (req->tcpopt.wsf <= 14) { | ||
1202 | inet_rsk(oreq)->wscale_ok = 1; | ||
1203 | inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; | ||
1204 | } | ||
1205 | inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if; | ||
1206 | |||
1207 | newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); | ||
1208 | if (!newsk) | ||
1209 | goto reject; | ||
1210 | |||
1211 | if (chtls_get_module(newsk)) | ||
1212 | goto reject; | ||
1213 | inet_csk_reqsk_queue_added(sk); | ||
1214 | reply_skb->sk = newsk; | ||
1215 | chtls_install_cpl_ops(newsk); | ||
1216 | cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family); | ||
1217 | csk = rcu_dereference_sk_user_data(newsk); | ||
1218 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); | ||
1219 | csk->listen_ctx = listen_ctx; | ||
1220 | __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq); | ||
1221 | chtls_pass_accept_rpl(reply_skb, req, tid); | ||
1222 | kfree_skb(skb); | ||
1223 | return; | ||
1224 | |||
1225 | free_oreq: | ||
1226 | chtls_reqsk_free(oreq); | ||
1227 | reject: | ||
1228 | mk_tid_release(reply_skb, 0, tid); | ||
1229 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); | ||
1230 | kfree_skb(skb); | ||
1231 | } | ||
1232 | |||
1233 | /* | ||
1234 | * Handle a CPL_PASS_ACCEPT_REQ message. | ||
1235 | */ | ||
1236 | static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1237 | { | ||
1238 | struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR; | ||
1239 | struct listen_ctx *ctx; | ||
1240 | unsigned int stid; | ||
1241 | unsigned int tid; | ||
1242 | struct sock *lsk; | ||
1243 | void *data; | ||
1244 | |||
1245 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); | ||
1246 | tid = GET_TID(req); | ||
1247 | |||
1248 | data = lookup_stid(cdev->tids, stid); | ||
1249 | if (!data) | ||
1250 | return 1; | ||
1251 | |||
1252 | ctx = (struct listen_ctx *)data; | ||
1253 | lsk = ctx->lsk; | ||
1254 | |||
1255 | if (unlikely(tid >= cdev->tids->ntids)) { | ||
1256 | pr_info("passive open TID %u too large\n", tid); | ||
1257 | return 1; | ||
1258 | } | ||
1259 | |||
1260 | BLOG_SKB_CB(skb)->cdev = cdev; | ||
1261 | process_cpl_msg(chtls_pass_accept_request, lsk, skb); | ||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | /* | ||
1266 | * Completes some final bits of initialization for just established connections | ||
1267 | * and changes their state to TCP_ESTABLISHED. | ||
1268 | * | ||
1269 | * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1. | ||
1270 | */ | ||
1271 | static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) | ||
1272 | { | ||
1273 | struct tcp_sock *tp = tcp_sk(sk); | ||
1274 | |||
1275 | tp->pushed_seq = snd_isn; | ||
1276 | tp->write_seq = snd_isn; | ||
1277 | tp->snd_nxt = snd_isn; | ||
1278 | tp->snd_una = snd_isn; | ||
1279 | inet_sk(sk)->inet_id = tp->write_seq ^ jiffies; | ||
1280 | assign_rxopt(sk, opt); | ||
1281 | |||
1282 | if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) | ||
1283 | tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10); | ||
1284 | |||
1285 | smp_mb(); | ||
1286 | tcp_set_state(sk, TCP_ESTABLISHED); | ||
1287 | } | ||
1288 | |||
1289 | static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb) | ||
1290 | { | ||
1291 | struct sk_buff *abort_skb; | ||
1292 | |||
1293 | abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); | ||
1294 | if (abort_skb) | ||
1295 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb); | ||
1296 | } | ||
1297 | |||
1298 | static struct sock *reap_list; | ||
1299 | static DEFINE_SPINLOCK(reap_list_lock); | ||
1300 | |||
1301 | /* | ||
1302 | * Process the reap list. | ||
1303 | */ | ||
1304 | DECLARE_TASK_FUNC(process_reap_list, task_param) | ||
1305 | { | ||
1306 | spin_lock_bh(&reap_list_lock); | ||
1307 | while (reap_list) { | ||
1308 | struct sock *sk = reap_list; | ||
1309 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
1310 | |||
1311 | reap_list = csk->passive_reap_next; | ||
1312 | csk->passive_reap_next = NULL; | ||
1313 | spin_unlock(&reap_list_lock); | ||
1314 | sock_hold(sk); | ||
1315 | |||
1316 | bh_lock_sock(sk); | ||
1317 | chtls_abort_conn(sk, NULL); | ||
1318 | sock_orphan(sk); | ||
1319 | if (sk->sk_state == TCP_CLOSE) | ||
1320 | inet_csk_destroy_sock(sk); | ||
1321 | bh_unlock_sock(sk); | ||
1322 | sock_put(sk); | ||
1323 | spin_lock(&reap_list_lock); | ||
1324 | } | ||
1325 | spin_unlock_bh(&reap_list_lock); | ||
1326 | } | ||
1327 | |||
1328 | static DECLARE_WORK(reap_task, process_reap_list); | ||
1329 | |||
1330 | static void add_to_reap_list(struct sock *sk) | ||
1331 | { | ||
1332 | struct chtls_sock *csk = sk->sk_user_data; | ||
1333 | |||
1334 | local_bh_disable(); | ||
1335 | bh_lock_sock(sk); | ||
1336 | release_tcp_port(sk); /* release the port immediately */ | ||
1337 | |||
1338 | spin_lock(&reap_list_lock); | ||
1339 | csk->passive_reap_next = reap_list; | ||
1340 | reap_list = sk; | ||
1341 | if (!csk->passive_reap_next) | ||
1342 | schedule_work(&reap_task); | ||
1343 | spin_unlock(&reap_list_lock); | ||
1344 | bh_unlock_sock(sk); | ||
1345 | local_bh_enable(); | ||
1346 | } | ||
1347 | |||
1348 | static void add_pass_open_to_parent(struct sock *child, struct sock *lsk, | ||
1349 | struct chtls_dev *cdev) | ||
1350 | { | ||
1351 | struct request_sock *oreq; | ||
1352 | struct chtls_sock *csk; | ||
1353 | |||
1354 | if (lsk->sk_state != TCP_LISTEN) | ||
1355 | return; | ||
1356 | |||
1357 | csk = child->sk_user_data; | ||
1358 | oreq = csk->passive_reap_next; | ||
1359 | csk->passive_reap_next = NULL; | ||
1360 | |||
1361 | reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq); | ||
1362 | __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq); | ||
1363 | |||
1364 | if (sk_acceptq_is_full(lsk)) { | ||
1365 | chtls_reqsk_free(oreq); | ||
1366 | add_to_reap_list(child); | ||
1367 | } else { | ||
1368 | refcount_set(&oreq->rsk_refcnt, 1); | ||
1369 | inet_csk_reqsk_queue_add(lsk, oreq, child); | ||
1370 | lsk->sk_data_ready(lsk); | ||
1371 | } | ||
1372 | } | ||
1373 | |||
1374 | static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb) | ||
1375 | { | ||
1376 | struct sock *child = skb->sk; | ||
1377 | |||
1378 | skb->sk = NULL; | ||
1379 | add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev); | ||
1380 | kfree_skb(skb); | ||
1381 | } | ||
1382 | |||
1383 | static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1384 | { | ||
1385 | struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR; | ||
1386 | struct chtls_sock *csk; | ||
1387 | struct sock *lsk, *sk; | ||
1388 | unsigned int hwtid; | ||
1389 | |||
1390 | hwtid = GET_TID(req); | ||
1391 | sk = lookup_tid(cdev->tids, hwtid); | ||
1392 | if (!sk) | ||
1393 | return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE); | ||
1394 | |||
1395 | bh_lock_sock(sk); | ||
1396 | if (unlikely(sock_owned_by_user(sk))) { | ||
1397 | kfree_skb(skb); | ||
1398 | } else { | ||
1399 | unsigned int stid; | ||
1400 | void *data; | ||
1401 | |||
1402 | csk = sk->sk_user_data; | ||
1403 | csk->wr_max_credits = 64; | ||
1404 | csk->wr_credits = 64; | ||
1405 | csk->wr_unacked = 0; | ||
1406 | make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | ||
1407 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); | ||
1408 | sk->sk_state_change(sk); | ||
1409 | if (unlikely(sk->sk_socket)) | ||
1410 | sk_wake_async(sk, 0, POLL_OUT); | ||
1411 | |||
1412 | data = lookup_stid(cdev->tids, stid); | ||
1413 | lsk = ((struct listen_ctx *)data)->lsk; | ||
1414 | |||
1415 | bh_lock_sock(lsk); | ||
1416 | if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) { | ||
1417 | /* removed from synq */ | ||
1418 | bh_unlock_sock(lsk); | ||
1419 | kfree_skb(skb); | ||
1420 | goto unlock; | ||
1421 | } | ||
1422 | |||
1423 | if (likely(!sock_owned_by_user(lsk))) { | ||
1424 | kfree_skb(skb); | ||
1425 | add_pass_open_to_parent(sk, lsk, cdev); | ||
1426 | } else { | ||
1427 | skb->sk = sk; | ||
1428 | BLOG_SKB_CB(skb)->cdev = cdev; | ||
1429 | BLOG_SKB_CB(skb)->backlog_rcv = | ||
1430 | bl_add_pass_open_to_parent; | ||
1431 | __sk_add_backlog(lsk, skb); | ||
1432 | } | ||
1433 | bh_unlock_sock(lsk); | ||
1434 | } | ||
1435 | unlock: | ||
1436 | bh_unlock_sock(sk); | ||
1437 | return 0; | ||
1438 | } | ||
1439 | |||
1440 | /* | ||
1441 | * Handle receipt of an urgent pointer. | ||
1442 | */ | ||
1443 | static void handle_urg_ptr(struct sock *sk, u32 urg_seq) | ||
1444 | { | ||
1445 | struct tcp_sock *tp = tcp_sk(sk); | ||
1446 | |||
1447 | urg_seq--; | ||
1448 | if (tp->urg_data && !after(urg_seq, tp->urg_seq)) | ||
1449 | return; /* duplicate pointer */ | ||
1450 | |||
1451 | sk_send_sigurg(sk); | ||
1452 | if (tp->urg_seq == tp->copied_seq && tp->urg_data && | ||
1453 | !sock_flag(sk, SOCK_URGINLINE) && | ||
1454 | tp->copied_seq != tp->rcv_nxt) { | ||
1455 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); | ||
1456 | |||
1457 | tp->copied_seq++; | ||
1458 | if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len) | ||
1459 | chtls_free_skb(sk, skb); | ||
1460 | } | ||
1461 | |||
1462 | tp->urg_data = TCP_URG_NOTYET; | ||
1463 | tp->urg_seq = urg_seq; | ||
1464 | } | ||
1465 | |||
1466 | static void check_sk_callbacks(struct chtls_sock *csk) | ||
1467 | { | ||
1468 | struct sock *sk = csk->sk; | ||
1469 | |||
1470 | if (unlikely(sk->sk_user_data && | ||
1471 | !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD))) | ||
1472 | csk_set_flag(csk, CSK_CALLBACKS_CHKD); | ||
1473 | } | ||
1474 | |||
1475 | /* | ||
1476 | * Handles Rx data that arrives in a state where the socket isn't accepting | ||
1477 | * new data. | ||
1478 | */ | ||
1479 | static void handle_excess_rx(struct sock *sk, struct sk_buff *skb) | ||
1480 | { | ||
1481 | if (!csk_flag(sk, CSK_ABORT_SHUTDOWN)) | ||
1482 | chtls_abort_conn(sk, skb); | ||
1483 | |||
1484 | kfree_skb(skb); | ||
1485 | } | ||
1486 | |||
1487 | static void chtls_recv_data(struct sock *sk, struct sk_buff *skb) | ||
1488 | { | ||
1489 | struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR; | ||
1490 | struct chtls_sock *csk; | ||
1491 | struct tcp_sock *tp; | ||
1492 | |||
1493 | csk = rcu_dereference_sk_user_data(sk); | ||
1494 | tp = tcp_sk(sk); | ||
1495 | |||
1496 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { | ||
1497 | handle_excess_rx(sk, skb); | ||
1498 | return; | ||
1499 | } | ||
1500 | |||
1501 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); | ||
1502 | ULP_SKB_CB(skb)->psh = hdr->psh; | ||
1503 | skb_ulp_mode(skb) = ULP_MODE_NONE; | ||
1504 | |||
1505 | skb_reset_transport_header(skb); | ||
1506 | __skb_pull(skb, sizeof(*hdr) + RSS_HDR); | ||
1507 | if (!skb->data_len) | ||
1508 | __skb_trim(skb, ntohs(hdr->len)); | ||
1509 | |||
1510 | if (unlikely(hdr->urg)) | ||
1511 | handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg)); | ||
1512 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && | ||
1513 | tp->urg_seq - tp->rcv_nxt < skb->len)) | ||
1514 | tp->urg_data = TCP_URG_VALID | | ||
1515 | skb->data[tp->urg_seq - tp->rcv_nxt]; | ||
1516 | |||
1517 | if (unlikely(hdr->dack_mode != csk->delack_mode)) { | ||
1518 | csk->delack_mode = hdr->dack_mode; | ||
1519 | csk->delack_seq = tp->rcv_nxt; | ||
1520 | } | ||
1521 | |||
1522 | tcp_hdr(skb)->fin = 0; | ||
1523 | tp->rcv_nxt += skb->len; | ||
1524 | |||
1525 | __skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1526 | |||
1527 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1528 | check_sk_callbacks(csk); | ||
1529 | sk->sk_data_ready(sk); | ||
1530 | } | ||
1531 | } | ||
1532 | |||
1533 | static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1534 | { | ||
1535 | struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR; | ||
1536 | unsigned int hwtid = GET_TID(req); | ||
1537 | struct sock *sk; | ||
1538 | |||
1539 | sk = lookup_tid(cdev->tids, hwtid); | ||
1540 | skb_dst_set(skb, NULL); | ||
1541 | process_cpl_msg(chtls_recv_data, sk, skb); | ||
1542 | return 0; | ||
1543 | } | ||
1544 | |||
1545 | static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb) | ||
1546 | { | ||
1547 | struct cpl_tls_data *hdr = cplhdr(skb); | ||
1548 | struct chtls_sock *csk; | ||
1549 | struct chtls_hws *tlsk; | ||
1550 | struct tcp_sock *tp; | ||
1551 | |||
1552 | csk = rcu_dereference_sk_user_data(sk); | ||
1553 | tlsk = &csk->tlshws; | ||
1554 | tp = tcp_sk(sk); | ||
1555 | |||
1556 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { | ||
1557 | handle_excess_rx(sk, skb); | ||
1558 | return; | ||
1559 | } | ||
1560 | |||
1561 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); | ||
1562 | ULP_SKB_CB(skb)->flags = 0; | ||
1563 | skb_ulp_mode(skb) = ULP_MODE_TLS; | ||
1564 | |||
1565 | skb_reset_transport_header(skb); | ||
1566 | __skb_pull(skb, sizeof(*hdr)); | ||
1567 | if (!skb->data_len) | ||
1568 | __skb_trim(skb, | ||
1569 | CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd))); | ||
1570 | |||
1571 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq - | ||
1572 | tp->rcv_nxt < skb->len)) | ||
1573 | tp->urg_data = TCP_URG_VALID | | ||
1574 | skb->data[tp->urg_seq - tp->rcv_nxt]; | ||
1575 | |||
1576 | tcp_hdr(skb)->fin = 0; | ||
1577 | tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)); | ||
1578 | __skb_queue_tail(&tlsk->sk_recv_queue, skb); | ||
1579 | } | ||
1580 | |||
1581 | static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1582 | { | ||
1583 | struct cpl_tls_data *req = cplhdr(skb); | ||
1584 | unsigned int hwtid = GET_TID(req); | ||
1585 | struct sock *sk; | ||
1586 | |||
1587 | sk = lookup_tid(cdev->tids, hwtid); | ||
1588 | skb_dst_set(skb, NULL); | ||
1589 | process_cpl_msg(chtls_recv_pdu, sk, skb); | ||
1590 | return 0; | ||
1591 | } | ||
1592 | |||
1593 | static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen) | ||
1594 | { | ||
1595 | struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb); | ||
1596 | |||
1597 | skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length); | ||
1598 | tls_cmp_hdr->length = ntohs((__force __be16)nlen); | ||
1599 | } | ||
1600 | |||
1601 | static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) | ||
1602 | { | ||
1603 | struct cpl_rx_tls_cmp *cmp_cpl = cplhdr(skb); | ||
1604 | struct sk_buff *skb_rec; | ||
1605 | struct chtls_sock *csk; | ||
1606 | struct chtls_hws *tlsk; | ||
1607 | struct tcp_sock *tp; | ||
1608 | |||
1609 | csk = rcu_dereference_sk_user_data(sk); | ||
1610 | tlsk = &csk->tlshws; | ||
1611 | tp = tcp_sk(sk); | ||
1612 | |||
1613 | ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq); | ||
1614 | ULP_SKB_CB(skb)->flags = 0; | ||
1615 | |||
1616 | skb_reset_transport_header(skb); | ||
1617 | __skb_pull(skb, sizeof(*cmp_cpl)); | ||
1618 | if (!skb->data_len) | ||
1619 | __skb_trim(skb, CPL_RX_TLS_CMP_LENGTH_G | ||
1620 | (ntohl(cmp_cpl->pdulength_length))); | ||
1621 | |||
1622 | tp->rcv_nxt += | ||
1623 | CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length)); | ||
1624 | |||
1625 | skb_rec = __skb_dequeue(&tlsk->sk_recv_queue); | ||
1626 | if (!skb_rec) { | ||
1627 | ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_ND; | ||
1628 | __skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1629 | } else { | ||
1630 | chtls_set_hdrlen(skb, tlsk->pldlen); | ||
1631 | tlsk->pldlen = 0; | ||
1632 | __skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1633 | __skb_queue_tail(&sk->sk_receive_queue, skb_rec); | ||
1634 | } | ||
1635 | |||
1636 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1637 | check_sk_callbacks(csk); | ||
1638 | sk->sk_data_ready(sk); | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1643 | { | ||
1644 | struct cpl_rx_tls_cmp *req = cplhdr(skb); | ||
1645 | unsigned int hwtid = GET_TID(req); | ||
1646 | struct sock *sk; | ||
1647 | |||
1648 | sk = lookup_tid(cdev->tids, hwtid); | ||
1649 | skb_dst_set(skb, NULL); | ||
1650 | process_cpl_msg(chtls_rx_hdr, sk, skb); | ||
1651 | |||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | static void chtls_timewait(struct sock *sk) | ||
1656 | { | ||
1657 | struct tcp_sock *tp = tcp_sk(sk); | ||
1658 | |||
1659 | tp->rcv_nxt++; | ||
1660 | tp->rx_opt.ts_recent_stamp = get_seconds(); | ||
1661 | tp->srtt_us = 0; | ||
1662 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); | ||
1663 | } | ||
1664 | |||
1665 | static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) | ||
1666 | { | ||
1667 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
1668 | |||
1669 | sk->sk_shutdown |= RCV_SHUTDOWN; | ||
1670 | sock_set_flag(sk, SOCK_DONE); | ||
1671 | |||
1672 | switch (sk->sk_state) { | ||
1673 | case TCP_SYN_RECV: | ||
1674 | case TCP_ESTABLISHED: | ||
1675 | tcp_set_state(sk, TCP_CLOSE_WAIT); | ||
1676 | break; | ||
1677 | case TCP_FIN_WAIT1: | ||
1678 | tcp_set_state(sk, TCP_CLOSING); | ||
1679 | break; | ||
1680 | case TCP_FIN_WAIT2: | ||
1681 | chtls_release_resources(sk); | ||
1682 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) | ||
1683 | chtls_conn_done(sk); | ||
1684 | else | ||
1685 | chtls_timewait(sk); | ||
1686 | break; | ||
1687 | default: | ||
1688 | pr_info("cpl_peer_close in bad state %d\n", sk->sk_state); | ||
1689 | } | ||
1690 | |||
1691 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1692 | sk->sk_state_change(sk); | ||
1693 | /* Do not send POLL_HUP for half duplex close. */ | ||
1694 | |||
1695 | if ((sk->sk_shutdown & SEND_SHUTDOWN) || | ||
1696 | sk->sk_state == TCP_CLOSE) | ||
1697 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | ||
1698 | else | ||
1699 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | ||
1700 | } | ||
1701 | } | ||
1702 | |||
1703 | static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) | ||
1704 | { | ||
1705 | struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR; | ||
1706 | struct chtls_sock *csk; | ||
1707 | struct tcp_sock *tp; | ||
1708 | |||
1709 | csk = rcu_dereference_sk_user_data(sk); | ||
1710 | tp = tcp_sk(sk); | ||
1711 | |||
1712 | tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */ | ||
1713 | |||
1714 | switch (sk->sk_state) { | ||
1715 | case TCP_CLOSING: | ||
1716 | chtls_release_resources(sk); | ||
1717 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) | ||
1718 | chtls_conn_done(sk); | ||
1719 | else | ||
1720 | chtls_timewait(sk); | ||
1721 | break; | ||
1722 | case TCP_LAST_ACK: | ||
1723 | chtls_release_resources(sk); | ||
1724 | chtls_conn_done(sk); | ||
1725 | break; | ||
1726 | case TCP_FIN_WAIT1: | ||
1727 | tcp_set_state(sk, TCP_FIN_WAIT2); | ||
1728 | sk->sk_shutdown |= SEND_SHUTDOWN; | ||
1729 | |||
1730 | if (!sock_flag(sk, SOCK_DEAD)) | ||
1731 | sk->sk_state_change(sk); | ||
1732 | else if (tcp_sk(sk)->linger2 < 0 && | ||
1733 | !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN)) | ||
1734 | chtls_abort_conn(sk, skb); | ||
1735 | break; | ||
1736 | default: | ||
1737 | pr_info("close_con_rpl in bad state %d\n", sk->sk_state); | ||
1738 | } | ||
1739 | kfree_skb(skb); | ||
1740 | } | ||
1741 | |||
1742 | static struct sk_buff *get_cpl_skb(struct sk_buff *skb, | ||
1743 | size_t len, gfp_t gfp) | ||
1744 | { | ||
1745 | if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) { | ||
1746 | WARN_ONCE(skb->len < len, "skb alloc error"); | ||
1747 | __skb_trim(skb, len); | ||
1748 | skb_get(skb); | ||
1749 | } else { | ||
1750 | skb = alloc_skb(len, gfp); | ||
1751 | if (skb) | ||
1752 | __skb_put(skb, len); | ||
1753 | } | ||
1754 | return skb; | ||
1755 | } | ||
1756 | |||
1757 | static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid, | ||
1758 | int cmd) | ||
1759 | { | ||
1760 | struct cpl_abort_rpl *rpl = cplhdr(skb); | ||
1761 | |||
1762 | INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid); | ||
1763 | rpl->cmd = cmd; | ||
1764 | } | ||
1765 | |||
1766 | static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1767 | { | ||
1768 | struct cpl_abort_req_rss *req = cplhdr(skb); | ||
1769 | struct sk_buff *reply_skb; | ||
1770 | |||
1771 | reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), | ||
1772 | GFP_KERNEL | __GFP_NOFAIL); | ||
1773 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); | ||
1774 | set_abort_rpl_wr(reply_skb, GET_TID(req), | ||
1775 | (req->status & CPL_ABORT_NO_RST)); | ||
1776 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1); | ||
1777 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); | ||
1778 | kfree_skb(skb); | ||
1779 | } | ||
1780 | |||
1781 | static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, | ||
1782 | struct chtls_dev *cdev, int status, int queue) | ||
1783 | { | ||
1784 | struct cpl_abort_req_rss *req = cplhdr(skb); | ||
1785 | struct sk_buff *reply_skb; | ||
1786 | struct chtls_sock *csk; | ||
1787 | |||
1788 | csk = rcu_dereference_sk_user_data(sk); | ||
1789 | |||
1790 | reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), | ||
1791 | GFP_KERNEL); | ||
1792 | |||
1793 | if (!reply_skb) { | ||
1794 | req->status = (queue << 1); | ||
1795 | send_defer_abort_rpl(cdev, skb); | ||
1796 | return; | ||
1797 | } | ||
1798 | |||
1799 | set_abort_rpl_wr(reply_skb, GET_TID(req), status); | ||
1800 | kfree_skb(skb); | ||
1801 | |||
1802 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); | ||
1803 | if (csk_conn_inline(csk)) { | ||
1804 | struct l2t_entry *e = csk->l2t_entry; | ||
1805 | |||
1806 | if (e && sk->sk_state != TCP_SYN_RECV) { | ||
1807 | cxgb4_l2t_send(csk->egress_dev, reply_skb, e); | ||
1808 | return; | ||
1809 | } | ||
1810 | } | ||
1811 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); | ||
1812 | } | ||
1813 | |||
1814 | /* | ||
1815 | * Add an skb to the deferred skb queue for processing from process context. | ||
1816 | */ | ||
1817 | static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, | ||
1818 | defer_handler_t handler) | ||
1819 | { | ||
1820 | DEFERRED_SKB_CB(skb)->handler = handler; | ||
1821 | spin_lock_bh(&cdev->deferq.lock); | ||
1822 | __skb_queue_tail(&cdev->deferq, skb); | ||
1823 | if (skb_queue_len(&cdev->deferq) == 1) | ||
1824 | schedule_work(&cdev->deferq_task); | ||
1825 | spin_unlock_bh(&cdev->deferq.lock); | ||
1826 | } | ||
1827 | |||
1828 | static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, | ||
1829 | struct chtls_dev *cdev, | ||
1830 | int status, int queue) | ||
1831 | { | ||
1832 | struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR; | ||
1833 | struct sk_buff *reply_skb; | ||
1834 | struct chtls_sock *csk; | ||
1835 | unsigned int tid; | ||
1836 | |||
1837 | csk = rcu_dereference_sk_user_data(sk); | ||
1838 | tid = GET_TID(req); | ||
1839 | |||
1840 | reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any()); | ||
1841 | if (!reply_skb) { | ||
1842 | req->status = (queue << 1) | status; | ||
1843 | t4_defer_reply(skb, cdev, send_defer_abort_rpl); | ||
1844 | return; | ||
1845 | } | ||
1846 | |||
1847 | set_abort_rpl_wr(reply_skb, tid, status); | ||
1848 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); | ||
1849 | if (csk_conn_inline(csk)) { | ||
1850 | struct l2t_entry *e = csk->l2t_entry; | ||
1851 | |||
1852 | if (e && sk->sk_state != TCP_SYN_RECV) { | ||
1853 | cxgb4_l2t_send(csk->egress_dev, reply_skb, e); | ||
1854 | return; | ||
1855 | } | ||
1856 | } | ||
1857 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); | ||
1858 | kfree_skb(skb); | ||
1859 | } | ||
1860 | |||
1861 | /* | ||
1862 | * This is run from a listener's backlog to abort a child connection in | ||
1863 | * SYN_RCV state (i.e., one on the listener's SYN queue). | ||
1864 | */ | ||
1865 | static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) | ||
1866 | { | ||
1867 | struct chtls_sock *csk; | ||
1868 | struct sock *child; | ||
1869 | int queue; | ||
1870 | |||
1871 | child = skb->sk; | ||
1872 | csk = rcu_dereference_sk_user_data(child); | ||
1873 | queue = csk->txq_idx; | ||
1874 | |||
1875 | skb->sk = NULL; | ||
1876 | do_abort_syn_rcv(child, lsk); | ||
1877 | send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, | ||
1878 | CPL_ABORT_NO_RST, queue); | ||
1879 | } | ||
1880 | |||
1881 | static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) | ||
1882 | { | ||
1883 | const struct request_sock *oreq; | ||
1884 | struct listen_ctx *listen_ctx; | ||
1885 | struct chtls_sock *csk; | ||
1886 | struct chtls_dev *cdev; | ||
1887 | struct sock *psk; | ||
1888 | void *ctx; | ||
1889 | |||
1890 | csk = sk->sk_user_data; | ||
1891 | oreq = csk->passive_reap_next; | ||
1892 | cdev = csk->cdev; | ||
1893 | |||
1894 | if (!oreq) | ||
1895 | return -1; | ||
1896 | |||
1897 | ctx = lookup_stid(cdev->tids, oreq->ts_recent); | ||
1898 | if (!ctx) | ||
1899 | return -1; | ||
1900 | |||
1901 | listen_ctx = (struct listen_ctx *)ctx; | ||
1902 | psk = listen_ctx->lsk; | ||
1903 | |||
1904 | bh_lock_sock(psk); | ||
1905 | if (!sock_owned_by_user(psk)) { | ||
1906 | int queue = csk->txq_idx; | ||
1907 | |||
1908 | do_abort_syn_rcv(sk, psk); | ||
1909 | send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); | ||
1910 | } else { | ||
1911 | skb->sk = sk; | ||
1912 | BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; | ||
1913 | __sk_add_backlog(psk, skb); | ||
1914 | } | ||
1915 | bh_unlock_sock(psk); | ||
1916 | return 0; | ||
1917 | } | ||
1918 | |||
1919 | static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) | ||
1920 | { | ||
1921 | const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR; | ||
1922 | struct chtls_sock *csk = sk->sk_user_data; | ||
1923 | int rst_status = CPL_ABORT_NO_RST; | ||
1924 | int queue = csk->txq_idx; | ||
1925 | |||
1926 | if (is_neg_adv(req->status)) { | ||
1927 | if (sk->sk_state == TCP_SYN_RECV) | ||
1928 | chtls_set_tcb_tflag(sk, 0, 0); | ||
1929 | |||
1930 | kfree_skb(skb); | ||
1931 | return; | ||
1932 | } | ||
1933 | |||
1934 | csk_reset_flag(csk, CSK_ABORT_REQ_RCVD); | ||
1935 | |||
1936 | if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) && | ||
1937 | !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) { | ||
1938 | struct tcp_sock *tp = tcp_sk(sk); | ||
1939 | |||
1940 | if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0) | ||
1941 | WARN_ONCE(1, "send_tx_flowc error"); | ||
1942 | csk_set_flag(csk, CSK_TX_DATA_SENT); | ||
1943 | } | ||
1944 | |||
1945 | csk_set_flag(csk, CSK_ABORT_SHUTDOWN); | ||
1946 | |||
1947 | if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) { | ||
1948 | sk->sk_err = ETIMEDOUT; | ||
1949 | |||
1950 | if (!sock_flag(sk, SOCK_DEAD)) | ||
1951 | sk->sk_error_report(sk); | ||
1952 | |||
1953 | if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) | ||
1954 | return; | ||
1955 | |||
1956 | chtls_release_resources(sk); | ||
1957 | chtls_conn_done(sk); | ||
1958 | } | ||
1959 | |||
1960 | chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue); | ||
1961 | } | ||
1962 | |||
1963 | static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) | ||
1964 | { | ||
1965 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR; | ||
1966 | struct chtls_sock *csk; | ||
1967 | struct chtls_dev *cdev; | ||
1968 | |||
1969 | csk = rcu_dereference_sk_user_data(sk); | ||
1970 | cdev = csk->cdev; | ||
1971 | |||
1972 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) { | ||
1973 | csk_reset_flag(csk, CSK_ABORT_RPL_PENDING); | ||
1974 | if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) { | ||
1975 | if (sk->sk_state == TCP_SYN_SENT) { | ||
1976 | cxgb4_remove_tid(cdev->tids, | ||
1977 | csk->port_id, | ||
1978 | GET_TID(rpl), | ||
1979 | sk->sk_family); | ||
1980 | sock_put(sk); | ||
1981 | } | ||
1982 | chtls_release_resources(sk); | ||
1983 | chtls_conn_done(sk); | ||
1984 | } | ||
1985 | } | ||
1986 | kfree_skb(skb); | ||
1987 | } | ||
1988 | |||
1989 | static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb) | ||
1990 | { | ||
1991 | struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR; | ||
1992 | void (*fn)(struct sock *sk, struct sk_buff *skb); | ||
1993 | unsigned int hwtid = GET_TID(req); | ||
1994 | struct sock *sk; | ||
1995 | u8 opcode; | ||
1996 | |||
1997 | opcode = ((const struct rss_header *)cplhdr(skb))->opcode; | ||
1998 | |||
1999 | sk = lookup_tid(cdev->tids, hwtid); | ||
2000 | if (!sk) | ||
2001 | goto rel_skb; | ||
2002 | |||
2003 | switch (opcode) { | ||
2004 | case CPL_PEER_CLOSE: | ||
2005 | fn = chtls_peer_close; | ||
2006 | break; | ||
2007 | case CPL_CLOSE_CON_RPL: | ||
2008 | fn = chtls_close_con_rpl; | ||
2009 | break; | ||
2010 | case CPL_ABORT_REQ_RSS: | ||
2011 | fn = chtls_abort_req_rss; | ||
2012 | break; | ||
2013 | case CPL_ABORT_RPL_RSS: | ||
2014 | fn = chtls_abort_rpl_rss; | ||
2015 | break; | ||
2016 | default: | ||
2017 | goto rel_skb; | ||
2018 | } | ||
2019 | |||
2020 | process_cpl_msg(fn, sk, skb); | ||
2021 | return 0; | ||
2022 | |||
2023 | rel_skb: | ||
2024 | kfree_skb(skb); | ||
2025 | return 0; | ||
2026 | } | ||
2027 | |||
2028 | static struct sk_buff *dequeue_wr(struct sock *sk) | ||
2029 | { | ||
2030 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); | ||
2031 | struct sk_buff *skb = csk->wr_skb_head; | ||
2032 | |||
2033 | if (likely(skb)) { | ||
2034 | /* Don't bother clearing the tail */ | ||
2035 | csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; | ||
2036 | WR_SKB_CB(skb)->next_wr = NULL; | ||
2037 | } | ||
2038 | return skb; | ||
2039 | } | ||
2040 | |||
2041 | static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) | ||
2042 | { | ||
2043 | struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; | ||
2044 | struct chtls_sock *csk = sk->sk_user_data; | ||
2045 | struct tcp_sock *tp = tcp_sk(sk); | ||
2046 | u32 credits = hdr->credits; | ||
2047 | u32 snd_una; | ||
2048 | |||
2049 | snd_una = ntohl(hdr->snd_una); | ||
2050 | csk->wr_credits += credits; | ||
2051 | |||
2052 | if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits) | ||
2053 | csk->wr_unacked = csk->wr_max_credits - csk->wr_credits; | ||
2054 | |||
2055 | while (credits) { | ||
2056 | struct sk_buff *pskb = csk->wr_skb_head; | ||
2057 | u32 csum; | ||
2058 | |||
2059 | if (unlikely(!pskb)) { | ||
2060 | if (csk->wr_nondata) | ||
2061 | csk->wr_nondata -= credits; | ||
2062 | break; | ||
2063 | } | ||
2064 | csum = (__force u32)pskb->csum; | ||
2065 | if (unlikely(credits < csum)) { | ||
2066 | pskb->csum = (__force __wsum)(csum - credits); | ||
2067 | break; | ||
2068 | } | ||
2069 | dequeue_wr(sk); | ||
2070 | credits -= csum; | ||
2071 | kfree_skb(pskb); | ||
2072 | } | ||
2073 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { | ||
2074 | if (unlikely(before(snd_una, tp->snd_una))) { | ||
2075 | kfree_skb(skb); | ||
2076 | return; | ||
2077 | } | ||
2078 | |||
2079 | if (tp->snd_una != snd_una) { | ||
2080 | tp->snd_una = snd_una; | ||
2081 | tp->rcv_tstamp = tcp_time_stamp(tp); | ||
2082 | if (tp->snd_una == tp->snd_nxt && | ||
2083 | !csk_flag_nochk(csk, CSK_TX_FAILOVER)) | ||
2084 | csk_reset_flag(csk, CSK_TX_WAIT_IDLE); | ||
2085 | } | ||
2086 | } | ||
2087 | |||
2088 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) { | ||
2089 | unsigned int fclen16 = roundup(failover_flowc_wr_len, 16); | ||
2090 | |||
2091 | csk->wr_credits -= fclen16; | ||
2092 | csk_reset_flag(csk, CSK_TX_WAIT_IDLE); | ||
2093 | csk_reset_flag(csk, CSK_TX_FAILOVER); | ||
2094 | } | ||
2095 | if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0)) | ||
2096 | sk->sk_write_space(sk); | ||
2097 | |||
2098 | kfree_skb(skb); | ||
2099 | } | ||
2100 | |||
2101 | static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb) | ||
2102 | { | ||
2103 | struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR; | ||
2104 | unsigned int hwtid = GET_TID(rpl); | ||
2105 | struct sock *sk; | ||
2106 | |||
2107 | sk = lookup_tid(cdev->tids, hwtid); | ||
2108 | process_cpl_msg(chtls_rx_ack, sk, skb); | ||
2109 | |||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = { | ||
2114 | [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl, | ||
2115 | [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl, | ||
2116 | [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req, | ||
2117 | [CPL_PASS_ESTABLISH] = chtls_pass_establish, | ||
2118 | [CPL_RX_DATA] = chtls_rx_data, | ||
2119 | [CPL_TLS_DATA] = chtls_rx_pdu, | ||
2120 | [CPL_RX_TLS_CMP] = chtls_rx_cmp, | ||
2121 | [CPL_PEER_CLOSE] = chtls_conn_cpl, | ||
2122 | [CPL_CLOSE_CON_RPL] = chtls_conn_cpl, | ||
2123 | [CPL_ABORT_REQ_RSS] = chtls_conn_cpl, | ||
2124 | [CPL_ABORT_RPL_RSS] = chtls_conn_cpl, | ||
2125 | [CPL_FW4_ACK] = chtls_wr_ack, | ||
2126 | }; | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e7e36433cdb5..57b5468b5139 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -332,6 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
332 | tcp_update_metrics(sk); | 332 | tcp_update_metrics(sk); |
333 | tcp_done(sk); | 333 | tcp_done(sk); |
334 | } | 334 | } |
335 | EXPORT_SYMBOL(tcp_time_wait); | ||
335 | 336 | ||
336 | void tcp_twsk_destructor(struct sock *sk) | 337 | void tcp_twsk_destructor(struct sock *sk) |
337 | { | 338 | { |