diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 2374 |
1 files changed, 2374 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c new file mode 100644 index 000000000000..30ce0a8eca09 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -0,0 +1,2374 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/workqueue.h> | ||
35 | #include <linux/skbuff.h> | ||
36 | #include <linux/timer.h> | ||
37 | #include <linux/notifier.h> | ||
38 | #include <linux/inetdevice.h> | ||
39 | #include <linux/ip.h> | ||
40 | #include <linux/tcp.h> | ||
41 | |||
42 | #include <net/neighbour.h> | ||
43 | #include <net/netevent.h> | ||
44 | #include <net/route.h> | ||
45 | |||
46 | #include "iw_cxgb4.h" | ||
47 | |||
48 | static char *states[] = { | ||
49 | "idle", | ||
50 | "listen", | ||
51 | "connecting", | ||
52 | "mpa_wait_req", | ||
53 | "mpa_req_sent", | ||
54 | "mpa_req_rcvd", | ||
55 | "mpa_rep_sent", | ||
56 | "fpdu_mode", | ||
57 | "aborting", | ||
58 | "closing", | ||
59 | "moribund", | ||
60 | "dead", | ||
61 | NULL, | ||
62 | }; | ||
63 | |||
64 | int c4iw_max_read_depth = 8; | ||
65 | module_param(c4iw_max_read_depth, int, 0644); | ||
66 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); | ||
67 | |||
68 | static int enable_tcp_timestamps; | ||
69 | module_param(enable_tcp_timestamps, int, 0644); | ||
70 | MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); | ||
71 | |||
72 | static int enable_tcp_sack; | ||
73 | module_param(enable_tcp_sack, int, 0644); | ||
74 | MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); | ||
75 | |||
76 | static int enable_tcp_window_scaling = 1; | ||
77 | module_param(enable_tcp_window_scaling, int, 0644); | ||
78 | MODULE_PARM_DESC(enable_tcp_window_scaling, | ||
79 | "Enable tcp window scaling (default=1)"); | ||
80 | |||
81 | int c4iw_debug; | ||
82 | module_param(c4iw_debug, int, 0644); | ||
83 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); | ||
84 | |||
85 | static int peer2peer; | ||
86 | module_param(peer2peer, int, 0644); | ||
87 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); | ||
88 | |||
89 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; | ||
90 | module_param(p2p_type, int, 0644); | ||
91 | MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " | ||
92 | "1=RDMA_READ 0=RDMA_WRITE (default 1)"); | ||
93 | |||
94 | static int ep_timeout_secs = 60; | ||
95 | module_param(ep_timeout_secs, int, 0644); | ||
96 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | ||
97 | "in seconds (default=60)"); | ||
98 | |||
99 | static int mpa_rev = 1; | ||
100 | module_param(mpa_rev, int, 0644); | ||
101 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | ||
102 | "1 is spec compliant. (default=1)"); | ||
103 | |||
104 | static int markers_enabled; | ||
105 | module_param(markers_enabled, int, 0644); | ||
106 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | ||
107 | |||
108 | static int crc_enabled = 1; | ||
109 | module_param(crc_enabled, int, 0644); | ||
110 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | ||
111 | |||
112 | static int rcv_win = 256 * 1024; | ||
113 | module_param(rcv_win, int, 0644); | ||
114 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); | ||
115 | |||
116 | static int snd_win = 32 * 1024; | ||
117 | module_param(snd_win, int, 0644); | ||
118 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); | ||
119 | |||
120 | static struct workqueue_struct *workq; | ||
121 | |||
122 | static struct sk_buff_head rxq; | ||
123 | |||
124 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | ||
125 | static void ep_timeout(unsigned long arg); | ||
126 | static void connect_reply_upcall(struct c4iw_ep *ep, int status); | ||
127 | |||
128 | static LIST_HEAD(timeout_list); | ||
129 | static spinlock_t timeout_lock; | ||
130 | |||
131 | static void start_ep_timer(struct c4iw_ep *ep) | ||
132 | { | ||
133 | PDBG("%s ep %p\n", __func__, ep); | ||
134 | if (timer_pending(&ep->timer)) { | ||
135 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | ||
136 | del_timer_sync(&ep->timer); | ||
137 | } else | ||
138 | c4iw_get_ep(&ep->com); | ||
139 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | ||
140 | ep->timer.data = (unsigned long)ep; | ||
141 | ep->timer.function = ep_timeout; | ||
142 | add_timer(&ep->timer); | ||
143 | } | ||
144 | |||
145 | static void stop_ep_timer(struct c4iw_ep *ep) | ||
146 | { | ||
147 | PDBG("%s ep %p\n", __func__, ep); | ||
148 | if (!timer_pending(&ep->timer)) { | ||
149 | printk(KERN_ERR "%s timer stopped when its not running! " | ||
150 | "ep %p state %u\n", __func__, ep, ep->com.state); | ||
151 | WARN_ON(1); | ||
152 | return; | ||
153 | } | ||
154 | del_timer_sync(&ep->timer); | ||
155 | c4iw_put_ep(&ep->com); | ||
156 | } | ||
157 | |||
158 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | ||
159 | struct l2t_entry *l2e) | ||
160 | { | ||
161 | int error = 0; | ||
162 | |||
163 | if (c4iw_fatal_error(rdev)) { | ||
164 | kfree_skb(skb); | ||
165 | PDBG("%s - device in error state - dropping\n", __func__); | ||
166 | return -EIO; | ||
167 | } | ||
168 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); | ||
169 | if (error < 0) | ||
170 | kfree_skb(skb); | ||
171 | return error; | ||
172 | } | ||
173 | |||
174 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | ||
175 | { | ||
176 | int error = 0; | ||
177 | |||
178 | if (c4iw_fatal_error(rdev)) { | ||
179 | kfree_skb(skb); | ||
180 | PDBG("%s - device in error state - dropping\n", __func__); | ||
181 | return -EIO; | ||
182 | } | ||
183 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); | ||
184 | if (error < 0) | ||
185 | kfree_skb(skb); | ||
186 | return error; | ||
187 | } | ||
188 | |||
189 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | ||
190 | { | ||
191 | struct cpl_tid_release *req; | ||
192 | |||
193 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | ||
194 | if (!skb) | ||
195 | return; | ||
196 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | ||
197 | INIT_TP_WR(req, hwtid); | ||
198 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | ||
199 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | ||
200 | c4iw_ofld_send(rdev, skb); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | static void set_emss(struct c4iw_ep *ep, u16 opt) | ||
205 | { | ||
206 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; | ||
207 | ep->mss = ep->emss; | ||
208 | if (GET_TCPOPT_TSTAMP(opt)) | ||
209 | ep->emss -= 12; | ||
210 | if (ep->emss < 128) | ||
211 | ep->emss = 128; | ||
212 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), | ||
213 | ep->mss, ep->emss); | ||
214 | } | ||
215 | |||
216 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) | ||
217 | { | ||
218 | unsigned long flags; | ||
219 | enum c4iw_ep_state state; | ||
220 | |||
221 | spin_lock_irqsave(&epc->lock, flags); | ||
222 | state = epc->state; | ||
223 | spin_unlock_irqrestore(&epc->lock, flags); | ||
224 | return state; | ||
225 | } | ||
226 | |||
227 | static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | ||
228 | { | ||
229 | epc->state = new; | ||
230 | } | ||
231 | |||
232 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | ||
233 | { | ||
234 | unsigned long flags; | ||
235 | |||
236 | spin_lock_irqsave(&epc->lock, flags); | ||
237 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); | ||
238 | __state_set(epc, new); | ||
239 | spin_unlock_irqrestore(&epc->lock, flags); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | static void *alloc_ep(int size, gfp_t gfp) | ||
244 | { | ||
245 | struct c4iw_ep_common *epc; | ||
246 | |||
247 | epc = kzalloc(size, gfp); | ||
248 | if (epc) { | ||
249 | kref_init(&epc->kref); | ||
250 | spin_lock_init(&epc->lock); | ||
251 | init_waitqueue_head(&epc->waitq); | ||
252 | } | ||
253 | PDBG("%s alloc ep %p\n", __func__, epc); | ||
254 | return epc; | ||
255 | } | ||
256 | |||
257 | void _c4iw_free_ep(struct kref *kref) | ||
258 | { | ||
259 | struct c4iw_ep *ep; | ||
260 | |||
261 | ep = container_of(kref, struct c4iw_ep, com.kref); | ||
262 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | ||
263 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | ||
264 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | ||
265 | dst_release(ep->dst); | ||
266 | cxgb4_l2t_release(ep->l2t); | ||
267 | } | ||
268 | kfree(ep); | ||
269 | } | ||
270 | |||
271 | static void release_ep_resources(struct c4iw_ep *ep) | ||
272 | { | ||
273 | set_bit(RELEASE_RESOURCES, &ep->com.flags); | ||
274 | c4iw_put_ep(&ep->com); | ||
275 | } | ||
276 | |||
277 | static int status2errno(int status) | ||
278 | { | ||
279 | switch (status) { | ||
280 | case CPL_ERR_NONE: | ||
281 | return 0; | ||
282 | case CPL_ERR_CONN_RESET: | ||
283 | return -ECONNRESET; | ||
284 | case CPL_ERR_ARP_MISS: | ||
285 | return -EHOSTUNREACH; | ||
286 | case CPL_ERR_CONN_TIMEDOUT: | ||
287 | return -ETIMEDOUT; | ||
288 | case CPL_ERR_TCAM_FULL: | ||
289 | return -ENOMEM; | ||
290 | case CPL_ERR_CONN_EXIST: | ||
291 | return -EADDRINUSE; | ||
292 | default: | ||
293 | return -EIO; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * Try and reuse skbs already allocated... | ||
299 | */ | ||
300 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | ||
301 | { | ||
302 | if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { | ||
303 | skb_trim(skb, 0); | ||
304 | skb_get(skb); | ||
305 | skb_reset_transport_header(skb); | ||
306 | } else { | ||
307 | skb = alloc_skb(len, gfp); | ||
308 | } | ||
309 | return skb; | ||
310 | } | ||
311 | |||
312 | static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, | ||
313 | __be32 peer_ip, __be16 local_port, | ||
314 | __be16 peer_port, u8 tos) | ||
315 | { | ||
316 | struct rtable *rt; | ||
317 | struct flowi fl = { | ||
318 | .oif = 0, | ||
319 | .nl_u = { | ||
320 | .ip4_u = { | ||
321 | .daddr = peer_ip, | ||
322 | .saddr = local_ip, | ||
323 | .tos = tos} | ||
324 | }, | ||
325 | .proto = IPPROTO_TCP, | ||
326 | .uli_u = { | ||
327 | .ports = { | ||
328 | .sport = local_port, | ||
329 | .dport = peer_port} | ||
330 | } | ||
331 | }; | ||
332 | |||
333 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) | ||
334 | return NULL; | ||
335 | return rt; | ||
336 | } | ||
337 | |||
338 | static void arp_failure_discard(void *handle, struct sk_buff *skb) | ||
339 | { | ||
340 | PDBG("%s c4iw_dev %p\n", __func__, handle); | ||
341 | kfree_skb(skb); | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Handle an ARP failure for an active open. | ||
346 | */ | ||
347 | static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) | ||
348 | { | ||
349 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | ||
350 | kfree_skb(skb); | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | ||
355 | * and send it along. | ||
356 | */ | ||
357 | static void abort_arp_failure(void *handle, struct sk_buff *skb) | ||
358 | { | ||
359 | struct c4iw_rdev *rdev = handle; | ||
360 | struct cpl_abort_req *req = cplhdr(skb); | ||
361 | |||
362 | PDBG("%s rdev %p\n", __func__, rdev); | ||
363 | req->cmd = CPL_ABORT_NO_RST; | ||
364 | c4iw_ofld_send(rdev, skb); | ||
365 | } | ||
366 | |||
367 | static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | ||
368 | { | ||
369 | unsigned int flowclen = 80; | ||
370 | struct fw_flowc_wr *flowc; | ||
371 | int i; | ||
372 | |||
373 | skb = get_skb(skb, flowclen, GFP_KERNEL); | ||
374 | flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); | ||
375 | |||
376 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | | ||
377 | FW_FLOWC_WR_NPARAMS(8)); | ||
378 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, | ||
379 | 16)) | FW_WR_FLOWID(ep->hwtid)); | ||
380 | |||
381 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | ||
382 | flowc->mnemval[0].val = cpu_to_be32(0); | ||
383 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; | ||
384 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); | ||
385 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | ||
386 | flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); | ||
387 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; | ||
388 | flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); | ||
389 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; | ||
390 | flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); | ||
391 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | ||
392 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); | ||
393 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | ||
394 | flowc->mnemval[6].val = cpu_to_be32(snd_win); | ||
395 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | ||
396 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); | ||
397 | /* Pad WR to 16 byte boundary */ | ||
398 | flowc->mnemval[8].mnemonic = 0; | ||
399 | flowc->mnemval[8].val = 0; | ||
400 | for (i = 0; i < 9; i++) { | ||
401 | flowc->mnemval[i].r4[0] = 0; | ||
402 | flowc->mnemval[i].r4[1] = 0; | ||
403 | flowc->mnemval[i].r4[2] = 0; | ||
404 | } | ||
405 | |||
406 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
407 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | ||
408 | } | ||
409 | |||
410 | static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) | ||
411 | { | ||
412 | struct cpl_close_con_req *req; | ||
413 | struct sk_buff *skb; | ||
414 | int wrlen = roundup(sizeof *req, 16); | ||
415 | |||
416 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
417 | skb = get_skb(NULL, wrlen, gfp); | ||
418 | if (!skb) { | ||
419 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | ||
420 | return -ENOMEM; | ||
421 | } | ||
422 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
423 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | ||
424 | req = (struct cpl_close_con_req *) skb_put(skb, wrlen); | ||
425 | memset(req, 0, wrlen); | ||
426 | INIT_TP_WR(req, ep->hwtid); | ||
427 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, | ||
428 | ep->hwtid)); | ||
429 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
430 | } | ||
431 | |||
432 | static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | ||
433 | { | ||
434 | struct cpl_abort_req *req; | ||
435 | int wrlen = roundup(sizeof *req, 16); | ||
436 | |||
437 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
438 | skb = get_skb(skb, wrlen, gfp); | ||
439 | if (!skb) { | ||
440 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | ||
441 | __func__); | ||
442 | return -ENOMEM; | ||
443 | } | ||
444 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
445 | t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); | ||
446 | req = (struct cpl_abort_req *) skb_put(skb, wrlen); | ||
447 | memset(req, 0, wrlen); | ||
448 | INIT_TP_WR(req, ep->hwtid); | ||
449 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | ||
450 | req->cmd = CPL_ABORT_SEND_RST; | ||
451 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
452 | } | ||
453 | |||
454 | static int send_connect(struct c4iw_ep *ep) | ||
455 | { | ||
456 | struct cpl_act_open_req *req; | ||
457 | struct sk_buff *skb; | ||
458 | u64 opt0; | ||
459 | u32 opt2; | ||
460 | unsigned int mtu_idx; | ||
461 | int wscale; | ||
462 | int wrlen = roundup(sizeof *req, 16); | ||
463 | |||
464 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); | ||
465 | |||
466 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | ||
467 | if (!skb) { | ||
468 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | ||
469 | __func__); | ||
470 | return -ENOMEM; | ||
471 | } | ||
472 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); | ||
473 | |||
474 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | ||
475 | wscale = compute_wscale(rcv_win); | ||
476 | opt0 = KEEP_ALIVE(1) | | ||
477 | WND_SCALE(wscale) | | ||
478 | MSS_IDX(mtu_idx) | | ||
479 | L2T_IDX(ep->l2t->idx) | | ||
480 | TX_CHAN(ep->tx_chan) | | ||
481 | SMAC_SEL(ep->smac_idx) | | ||
482 | DSCP(ep->tos) | | ||
483 | RCV_BUFSIZ(rcv_win>>10); | ||
484 | opt2 = RX_CHANNEL(0) | | ||
485 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | ||
486 | if (enable_tcp_timestamps) | ||
487 | opt2 |= TSTAMPS_EN(1); | ||
488 | if (enable_tcp_sack) | ||
489 | opt2 |= SACK_EN(1); | ||
490 | if (wscale && enable_tcp_window_scaling) | ||
491 | opt2 |= WND_SCALE_EN(1); | ||
492 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | ||
493 | |||
494 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); | ||
495 | INIT_TP_WR(req, 0); | ||
496 | OPCODE_TID(req) = cpu_to_be32( | ||
497 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); | ||
498 | req->local_port = ep->com.local_addr.sin_port; | ||
499 | req->peer_port = ep->com.remote_addr.sin_port; | ||
500 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | ||
501 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | ||
502 | req->opt0 = cpu_to_be64(opt0); | ||
503 | req->params = 0; | ||
504 | req->opt2 = cpu_to_be32(opt2); | ||
505 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
506 | } | ||
507 | |||
508 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) | ||
509 | { | ||
510 | int mpalen, wrlen; | ||
511 | struct fw_ofld_tx_data_wr *req; | ||
512 | struct mpa_message *mpa; | ||
513 | |||
514 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | ||
515 | |||
516 | BUG_ON(skb_cloned(skb)); | ||
517 | |||
518 | mpalen = sizeof(*mpa) + ep->plen; | ||
519 | wrlen = roundup(mpalen + sizeof *req, 16); | ||
520 | skb = get_skb(skb, wrlen, GFP_KERNEL); | ||
521 | if (!skb) { | ||
522 | connect_reply_upcall(ep, -ENOMEM); | ||
523 | return; | ||
524 | } | ||
525 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
526 | |||
527 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | ||
528 | memset(req, 0, wrlen); | ||
529 | req->op_to_immdlen = cpu_to_be32( | ||
530 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | ||
531 | FW_WR_COMPL(1) | | ||
532 | FW_WR_IMMDLEN(mpalen)); | ||
533 | req->flowid_len16 = cpu_to_be32( | ||
534 | FW_WR_FLOWID(ep->hwtid) | | ||
535 | FW_WR_LEN16(wrlen >> 4)); | ||
536 | req->plen = cpu_to_be32(mpalen); | ||
537 | req->tunnel_to_proxy = cpu_to_be32( | ||
538 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | ||
539 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | ||
540 | |||
541 | mpa = (struct mpa_message *)(req + 1); | ||
542 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | ||
543 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | ||
544 | (markers_enabled ? MPA_MARKERS : 0); | ||
545 | mpa->private_data_size = htons(ep->plen); | ||
546 | mpa->revision = mpa_rev; | ||
547 | |||
548 | if (ep->plen) | ||
549 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
550 | |||
551 | /* | ||
552 | * Reference the mpa skb. This ensures the data area | ||
553 | * will remain in memory until the hw acks the tx. | ||
554 | * Function fw4_ack() will deref it. | ||
555 | */ | ||
556 | skb_get(skb); | ||
557 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | ||
558 | BUG_ON(ep->mpa_skb); | ||
559 | ep->mpa_skb = skb; | ||
560 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
561 | start_ep_timer(ep); | ||
562 | state_set(&ep->com, MPA_REQ_SENT); | ||
563 | ep->mpa_attr.initiator = 1; | ||
564 | return; | ||
565 | } | ||
566 | |||
567 | static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | ||
568 | { | ||
569 | int mpalen, wrlen; | ||
570 | struct fw_ofld_tx_data_wr *req; | ||
571 | struct mpa_message *mpa; | ||
572 | struct sk_buff *skb; | ||
573 | |||
574 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | ||
575 | |||
576 | mpalen = sizeof(*mpa) + plen; | ||
577 | wrlen = roundup(mpalen + sizeof *req, 16); | ||
578 | |||
579 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | ||
580 | if (!skb) { | ||
581 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | ||
582 | return -ENOMEM; | ||
583 | } | ||
584 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
585 | |||
586 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | ||
587 | memset(req, 0, wrlen); | ||
588 | req->op_to_immdlen = cpu_to_be32( | ||
589 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | ||
590 | FW_WR_COMPL(1) | | ||
591 | FW_WR_IMMDLEN(mpalen)); | ||
592 | req->flowid_len16 = cpu_to_be32( | ||
593 | FW_WR_FLOWID(ep->hwtid) | | ||
594 | FW_WR_LEN16(wrlen >> 4)); | ||
595 | req->plen = cpu_to_be32(mpalen); | ||
596 | req->tunnel_to_proxy = cpu_to_be32( | ||
597 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | ||
598 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | ||
599 | |||
600 | mpa = (struct mpa_message *)(req + 1); | ||
601 | memset(mpa, 0, sizeof(*mpa)); | ||
602 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | ||
603 | mpa->flags = MPA_REJECT; | ||
604 | mpa->revision = mpa_rev; | ||
605 | mpa->private_data_size = htons(plen); | ||
606 | if (plen) | ||
607 | memcpy(mpa->private_data, pdata, plen); | ||
608 | |||
609 | /* | ||
610 | * Reference the mpa skb again. This ensures the data area | ||
611 | * will remain in memory until the hw acks the tx. | ||
612 | * Function fw4_ack() will deref it. | ||
613 | */ | ||
614 | skb_get(skb); | ||
615 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
616 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | ||
617 | BUG_ON(ep->mpa_skb); | ||
618 | ep->mpa_skb = skb; | ||
619 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
620 | } | ||
621 | |||
622 | static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | ||
623 | { | ||
624 | int mpalen, wrlen; | ||
625 | struct fw_ofld_tx_data_wr *req; | ||
626 | struct mpa_message *mpa; | ||
627 | struct sk_buff *skb; | ||
628 | |||
629 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | ||
630 | |||
631 | mpalen = sizeof(*mpa) + plen; | ||
632 | wrlen = roundup(mpalen + sizeof *req, 16); | ||
633 | |||
634 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | ||
635 | if (!skb) { | ||
636 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | ||
637 | return -ENOMEM; | ||
638 | } | ||
639 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
640 | |||
641 | req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); | ||
642 | memset(req, 0, wrlen); | ||
643 | req->op_to_immdlen = cpu_to_be32( | ||
644 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | ||
645 | FW_WR_COMPL(1) | | ||
646 | FW_WR_IMMDLEN(mpalen)); | ||
647 | req->flowid_len16 = cpu_to_be32( | ||
648 | FW_WR_FLOWID(ep->hwtid) | | ||
649 | FW_WR_LEN16(wrlen >> 4)); | ||
650 | req->plen = cpu_to_be32(mpalen); | ||
651 | req->tunnel_to_proxy = cpu_to_be32( | ||
652 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | ||
653 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | ||
654 | |||
655 | mpa = (struct mpa_message *)(req + 1); | ||
656 | memset(mpa, 0, sizeof(*mpa)); | ||
657 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | ||
658 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | ||
659 | (markers_enabled ? MPA_MARKERS : 0); | ||
660 | mpa->revision = mpa_rev; | ||
661 | mpa->private_data_size = htons(plen); | ||
662 | if (plen) | ||
663 | memcpy(mpa->private_data, pdata, plen); | ||
664 | |||
665 | /* | ||
666 | * Reference the mpa skb. This ensures the data area | ||
667 | * will remain in memory until the hw acks the tx. | ||
668 | * Function fw4_ack() will deref it. | ||
669 | */ | ||
670 | skb_get(skb); | ||
671 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | ||
672 | ep->mpa_skb = skb; | ||
673 | state_set(&ep->com, MPA_REP_SENT); | ||
674 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
675 | } | ||
676 | |||
677 | static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | ||
678 | { | ||
679 | struct c4iw_ep *ep; | ||
680 | struct cpl_act_establish *req = cplhdr(skb); | ||
681 | unsigned int tid = GET_TID(req); | ||
682 | unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); | ||
683 | struct tid_info *t = dev->rdev.lldi.tids; | ||
684 | |||
685 | ep = lookup_atid(t, atid); | ||
686 | |||
687 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, | ||
688 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); | ||
689 | |||
690 | dst_confirm(ep->dst); | ||
691 | |||
692 | /* setup the hwtid for this connection */ | ||
693 | ep->hwtid = tid; | ||
694 | cxgb4_insert_tid(t, ep, tid); | ||
695 | |||
696 | ep->snd_seq = be32_to_cpu(req->snd_isn); | ||
697 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | ||
698 | |||
699 | set_emss(ep, ntohs(req->tcp_opt)); | ||
700 | |||
701 | /* dealloc the atid */ | ||
702 | cxgb4_free_atid(t, atid); | ||
703 | |||
704 | /* start MPA negotiation */ | ||
705 | send_flowc(ep, NULL); | ||
706 | send_mpa_req(ep, skb); | ||
707 | |||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | static void close_complete_upcall(struct c4iw_ep *ep) | ||
712 | { | ||
713 | struct iw_cm_event event; | ||
714 | |||
715 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
716 | memset(&event, 0, sizeof(event)); | ||
717 | event.event = IW_CM_EVENT_CLOSE; | ||
718 | if (ep->com.cm_id) { | ||
719 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", | ||
720 | ep, ep->com.cm_id, ep->hwtid); | ||
721 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
722 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
723 | ep->com.cm_id = NULL; | ||
724 | ep->com.qp = NULL; | ||
725 | } | ||
726 | } | ||
727 | |||
728 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | ||
729 | { | ||
730 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
731 | close_complete_upcall(ep); | ||
732 | state_set(&ep->com, ABORTING); | ||
733 | return send_abort(ep, skb, gfp); | ||
734 | } | ||
735 | |||
736 | static void peer_close_upcall(struct c4iw_ep *ep) | ||
737 | { | ||
738 | struct iw_cm_event event; | ||
739 | |||
740 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
741 | memset(&event, 0, sizeof(event)); | ||
742 | event.event = IW_CM_EVENT_DISCONNECT; | ||
743 | if (ep->com.cm_id) { | ||
744 | PDBG("peer close delivered ep %p cm_id %p tid %u\n", | ||
745 | ep, ep->com.cm_id, ep->hwtid); | ||
746 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static void peer_abort_upcall(struct c4iw_ep *ep) | ||
751 | { | ||
752 | struct iw_cm_event event; | ||
753 | |||
754 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
755 | memset(&event, 0, sizeof(event)); | ||
756 | event.event = IW_CM_EVENT_CLOSE; | ||
757 | event.status = -ECONNRESET; | ||
758 | if (ep->com.cm_id) { | ||
759 | PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, | ||
760 | ep->com.cm_id, ep->hwtid); | ||
761 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
762 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
763 | ep->com.cm_id = NULL; | ||
764 | ep->com.qp = NULL; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | static void connect_reply_upcall(struct c4iw_ep *ep, int status) | ||
769 | { | ||
770 | struct iw_cm_event event; | ||
771 | |||
772 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); | ||
773 | memset(&event, 0, sizeof(event)); | ||
774 | event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
775 | event.status = status; | ||
776 | event.local_addr = ep->com.local_addr; | ||
777 | event.remote_addr = ep->com.remote_addr; | ||
778 | |||
779 | if ((status == 0) || (status == -ECONNREFUSED)) { | ||
780 | event.private_data_len = ep->plen; | ||
781 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
782 | } | ||
783 | if (ep->com.cm_id) { | ||
784 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | ||
785 | ep->hwtid, status); | ||
786 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
787 | } | ||
788 | if (status < 0) { | ||
789 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
790 | ep->com.cm_id = NULL; | ||
791 | ep->com.qp = NULL; | ||
792 | } | ||
793 | } | ||
794 | |||
795 | static void connect_request_upcall(struct c4iw_ep *ep) | ||
796 | { | ||
797 | struct iw_cm_event event; | ||
798 | |||
799 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
800 | memset(&event, 0, sizeof(event)); | ||
801 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | ||
802 | event.local_addr = ep->com.local_addr; | ||
803 | event.remote_addr = ep->com.remote_addr; | ||
804 | event.private_data_len = ep->plen; | ||
805 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
806 | event.provider_data = ep; | ||
807 | if (state_read(&ep->parent_ep->com) != DEAD) { | ||
808 | c4iw_get_ep(&ep->com); | ||
809 | ep->parent_ep->com.cm_id->event_handler( | ||
810 | ep->parent_ep->com.cm_id, | ||
811 | &event); | ||
812 | } | ||
813 | c4iw_put_ep(&ep->parent_ep->com); | ||
814 | ep->parent_ep = NULL; | ||
815 | } | ||
816 | |||
817 | static void established_upcall(struct c4iw_ep *ep) | ||
818 | { | ||
819 | struct iw_cm_event event; | ||
820 | |||
821 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
822 | memset(&event, 0, sizeof(event)); | ||
823 | event.event = IW_CM_EVENT_ESTABLISHED; | ||
824 | if (ep->com.cm_id) { | ||
825 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
826 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
827 | } | ||
828 | } | ||
829 | |||
830 | static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | ||
831 | { | ||
832 | struct cpl_rx_data_ack *req; | ||
833 | struct sk_buff *skb; | ||
834 | int wrlen = roundup(sizeof *req, 16); | ||
835 | |||
836 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | ||
837 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | ||
838 | if (!skb) { | ||
839 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); | ||
844 | memset(req, 0, wrlen); | ||
845 | INIT_TP_WR(req, ep->hwtid); | ||
846 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | ||
847 | ep->hwtid)); | ||
848 | req->credit_dack = cpu_to_be32(credits); | ||
849 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx); | ||
850 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | ||
851 | return credits; | ||
852 | } | ||
853 | |||
854 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | ||
855 | { | ||
856 | struct mpa_message *mpa; | ||
857 | u16 plen; | ||
858 | struct c4iw_qp_attributes attrs; | ||
859 | enum c4iw_qp_attr_mask mask; | ||
860 | int err; | ||
861 | |||
862 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
863 | |||
864 | /* | ||
865 | * Stop mpa timer. If it expired, then the state has | ||
866 | * changed and we bail since ep_timeout already aborted | ||
867 | * the connection. | ||
868 | */ | ||
869 | stop_ep_timer(ep); | ||
870 | if (state_read(&ep->com) != MPA_REQ_SENT) | ||
871 | return; | ||
872 | |||
873 | /* | ||
874 | * If we get more than the supported amount of private data | ||
875 | * then we must fail this connection. | ||
876 | */ | ||
877 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | ||
878 | err = -EINVAL; | ||
879 | goto err; | ||
880 | } | ||
881 | |||
882 | /* | ||
883 | * copy the new data into our accumulation buffer. | ||
884 | */ | ||
885 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | ||
886 | skb->len); | ||
887 | ep->mpa_pkt_len += skb->len; | ||
888 | |||
889 | /* | ||
890 | * if we don't even have the mpa message, then bail. | ||
891 | */ | ||
892 | if (ep->mpa_pkt_len < sizeof(*mpa)) | ||
893 | return; | ||
894 | mpa = (struct mpa_message *) ep->mpa_pkt; | ||
895 | |||
896 | /* Validate MPA header. */ | ||
897 | if (mpa->revision != mpa_rev) { | ||
898 | err = -EPROTO; | ||
899 | goto err; | ||
900 | } | ||
901 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | ||
902 | err = -EPROTO; | ||
903 | goto err; | ||
904 | } | ||
905 | |||
906 | plen = ntohs(mpa->private_data_size); | ||
907 | |||
908 | /* | ||
909 | * Fail if there's too much private data. | ||
910 | */ | ||
911 | if (plen > MPA_MAX_PRIVATE_DATA) { | ||
912 | err = -EPROTO; | ||
913 | goto err; | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * If plen does not account for pkt size | ||
918 | */ | ||
919 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | ||
920 | err = -EPROTO; | ||
921 | goto err; | ||
922 | } | ||
923 | |||
924 | ep->plen = (u8) plen; | ||
925 | |||
926 | /* | ||
927 | * If we don't have all the pdata yet, then bail. | ||
928 | * We'll continue process when more data arrives. | ||
929 | */ | ||
930 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | ||
931 | return; | ||
932 | |||
933 | if (mpa->flags & MPA_REJECT) { | ||
934 | err = -ECONNREFUSED; | ||
935 | goto err; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * If we get here we have accumulated the entire mpa | ||
940 | * start reply message including private data. And | ||
941 | * the MPA header is valid. | ||
942 | */ | ||
943 | state_set(&ep->com, FPDU_MODE); | ||
944 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | ||
945 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | ||
946 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | ||
947 | ep->mpa_attr.version = mpa_rev; | ||
948 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | ||
949 | FW_RI_INIT_P2PTYPE_DISABLED; | ||
950 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | ||
951 | "xmit_marker_enabled=%d, version=%d\n", __func__, | ||
952 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | ||
953 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | ||
954 | |||
955 | attrs.mpa_attr = ep->mpa_attr; | ||
956 | attrs.max_ird = ep->ird; | ||
957 | attrs.max_ord = ep->ord; | ||
958 | attrs.llp_stream_handle = ep; | ||
959 | attrs.next_state = C4IW_QP_STATE_RTS; | ||
960 | |||
961 | mask = C4IW_QP_ATTR_NEXT_STATE | | ||
962 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | | ||
963 | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; | ||
964 | |||
965 | /* bind QP and TID with INIT_WR */ | ||
966 | err = c4iw_modify_qp(ep->com.qp->rhp, | ||
967 | ep->com.qp, mask, &attrs, 1); | ||
968 | if (err) | ||
969 | goto err; | ||
970 | goto out; | ||
971 | err: | ||
972 | abort_connection(ep, skb, GFP_KERNEL); | ||
973 | out: | ||
974 | connect_reply_upcall(ep, err); | ||
975 | return; | ||
976 | } | ||
977 | |||
978 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | ||
979 | { | ||
980 | struct mpa_message *mpa; | ||
981 | u16 plen; | ||
982 | |||
983 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
984 | |||
985 | if (state_read(&ep->com) != MPA_REQ_WAIT) | ||
986 | return; | ||
987 | |||
988 | /* | ||
989 | * If we get more than the supported amount of private data | ||
990 | * then we must fail this connection. | ||
991 | */ | ||
992 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | ||
993 | stop_ep_timer(ep); | ||
994 | abort_connection(ep, skb, GFP_KERNEL); | ||
995 | return; | ||
996 | } | ||
997 | |||
998 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | ||
999 | |||
1000 | /* | ||
1001 | * Copy the new data into our accumulation buffer. | ||
1002 | */ | ||
1003 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | ||
1004 | skb->len); | ||
1005 | ep->mpa_pkt_len += skb->len; | ||
1006 | |||
1007 | /* | ||
1008 | * If we don't even have the mpa message, then bail. | ||
1009 | * We'll continue process when more data arrives. | ||
1010 | */ | ||
1011 | if (ep->mpa_pkt_len < sizeof(*mpa)) | ||
1012 | return; | ||
1013 | |||
1014 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | ||
1015 | stop_ep_timer(ep); | ||
1016 | mpa = (struct mpa_message *) ep->mpa_pkt; | ||
1017 | |||
1018 | /* | ||
1019 | * Validate MPA Header. | ||
1020 | */ | ||
1021 | if (mpa->revision != mpa_rev) { | ||
1022 | abort_connection(ep, skb, GFP_KERNEL); | ||
1023 | return; | ||
1024 | } | ||
1025 | |||
1026 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | ||
1027 | abort_connection(ep, skb, GFP_KERNEL); | ||
1028 | return; | ||
1029 | } | ||
1030 | |||
1031 | plen = ntohs(mpa->private_data_size); | ||
1032 | |||
1033 | /* | ||
1034 | * Fail if there's too much private data. | ||
1035 | */ | ||
1036 | if (plen > MPA_MAX_PRIVATE_DATA) { | ||
1037 | abort_connection(ep, skb, GFP_KERNEL); | ||
1038 | return; | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1042 | * If plen does not account for pkt size | ||
1043 | */ | ||
1044 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | ||
1045 | abort_connection(ep, skb, GFP_KERNEL); | ||
1046 | return; | ||
1047 | } | ||
1048 | ep->plen = (u8) plen; | ||
1049 | |||
1050 | /* | ||
1051 | * If we don't have all the pdata yet, then bail. | ||
1052 | */ | ||
1053 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | ||
1054 | return; | ||
1055 | |||
1056 | /* | ||
1057 | * If we get here we have accumulated the entire mpa | ||
1058 | * start reply message including private data. | ||
1059 | */ | ||
1060 | ep->mpa_attr.initiator = 0; | ||
1061 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | ||
1062 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | ||
1063 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | ||
1064 | ep->mpa_attr.version = mpa_rev; | ||
1065 | ep->mpa_attr.p2p_type = peer2peer ? p2p_type : | ||
1066 | FW_RI_INIT_P2PTYPE_DISABLED; | ||
1067 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | ||
1068 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | ||
1069 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | ||
1070 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | ||
1071 | ep->mpa_attr.p2p_type); | ||
1072 | |||
1073 | state_set(&ep->com, MPA_REQ_RCVD); | ||
1074 | |||
1075 | /* drive upcall */ | ||
1076 | connect_request_upcall(ep); | ||
1077 | return; | ||
1078 | } | ||
1079 | |||
1080 | static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1081 | { | ||
1082 | struct c4iw_ep *ep; | ||
1083 | struct cpl_rx_data *hdr = cplhdr(skb); | ||
1084 | unsigned int dlen = ntohs(hdr->len); | ||
1085 | unsigned int tid = GET_TID(hdr); | ||
1086 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1087 | |||
1088 | ep = lookup_tid(t, tid); | ||
1089 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); | ||
1090 | skb_pull(skb, sizeof(*hdr)); | ||
1091 | skb_trim(skb, dlen); | ||
1092 | |||
1093 | ep->rcv_seq += dlen; | ||
1094 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | ||
1095 | |||
1096 | /* update RX credits */ | ||
1097 | update_rx_credits(ep, dlen); | ||
1098 | |||
1099 | switch (state_read(&ep->com)) { | ||
1100 | case MPA_REQ_SENT: | ||
1101 | process_mpa_reply(ep, skb); | ||
1102 | break; | ||
1103 | case MPA_REQ_WAIT: | ||
1104 | process_mpa_request(ep, skb); | ||
1105 | break; | ||
1106 | case MPA_REP_SENT: | ||
1107 | break; | ||
1108 | default: | ||
1109 | printk(KERN_ERR MOD "%s Unexpected streaming data." | ||
1110 | " ep %p state %d tid %u\n", | ||
1111 | __func__, ep, state_read(&ep->com), ep->hwtid); | ||
1112 | |||
1113 | /* | ||
1114 | * The ep will timeout and inform the ULP of the failure. | ||
1115 | * See ep_timeout(). | ||
1116 | */ | ||
1117 | break; | ||
1118 | } | ||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1123 | { | ||
1124 | struct c4iw_ep *ep; | ||
1125 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | ||
1126 | unsigned long flags; | ||
1127 | int release = 0; | ||
1128 | unsigned int tid = GET_TID(rpl); | ||
1129 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1130 | |||
1131 | ep = lookup_tid(t, tid); | ||
1132 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1133 | BUG_ON(!ep); | ||
1134 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1135 | switch (ep->com.state) { | ||
1136 | case ABORTING: | ||
1137 | __state_set(&ep->com, DEAD); | ||
1138 | release = 1; | ||
1139 | break; | ||
1140 | default: | ||
1141 | printk(KERN_ERR "%s ep %p state %d\n", | ||
1142 | __func__, ep, ep->com.state); | ||
1143 | break; | ||
1144 | } | ||
1145 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1146 | |||
1147 | if (release) | ||
1148 | release_ep_resources(ep); | ||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | /* | ||
1153 | * Return whether a failed active open has allocated a TID | ||
1154 | */ | ||
1155 | static inline int act_open_has_tid(int status) | ||
1156 | { | ||
1157 | return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && | ||
1158 | status != CPL_ERR_ARP_MISS; | ||
1159 | } | ||
1160 | |||
1161 | static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1162 | { | ||
1163 | struct c4iw_ep *ep; | ||
1164 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | ||
1165 | unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( | ||
1166 | ntohl(rpl->atid_status))); | ||
1167 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1168 | int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); | ||
1169 | |||
1170 | ep = lookup_atid(t, atid); | ||
1171 | |||
1172 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, | ||
1173 | status, status2errno(status)); | ||
1174 | |||
1175 | if (status == CPL_ERR_RTX_NEG_ADVICE) { | ||
1176 | printk(KERN_WARNING MOD "Connection problems for atid %u\n", | ||
1177 | atid); | ||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | connect_reply_upcall(ep, status2errno(status)); | ||
1182 | state_set(&ep->com, DEAD); | ||
1183 | |||
1184 | if (status && act_open_has_tid(status)) | ||
1185 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); | ||
1186 | |||
1187 | cxgb4_free_atid(t, atid); | ||
1188 | dst_release(ep->dst); | ||
1189 | cxgb4_l2t_release(ep->l2t); | ||
1190 | c4iw_put_ep(&ep->com); | ||
1191 | |||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1196 | { | ||
1197 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | ||
1198 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1199 | unsigned int stid = GET_TID(rpl); | ||
1200 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | ||
1201 | |||
1202 | if (!ep) { | ||
1203 | printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); | ||
1204 | return 0; | ||
1205 | } | ||
1206 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | ||
1207 | rpl->status, status2errno(rpl->status)); | ||
1208 | ep->com.rpl_err = status2errno(rpl->status); | ||
1209 | ep->com.rpl_done = 1; | ||
1210 | wake_up(&ep->com.waitq); | ||
1211 | |||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | static int listen_stop(struct c4iw_listen_ep *ep) | ||
1216 | { | ||
1217 | struct sk_buff *skb; | ||
1218 | struct cpl_close_listsvr_req *req; | ||
1219 | |||
1220 | PDBG("%s ep %p\n", __func__, ep); | ||
1221 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
1222 | if (!skb) { | ||
1223 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | ||
1224 | return -ENOMEM; | ||
1225 | } | ||
1226 | req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); | ||
1227 | INIT_TP_WR(req, 0); | ||
1228 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, | ||
1229 | ep->stid)); | ||
1230 | req->reply_ctrl = cpu_to_be16( | ||
1231 | QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); | ||
1232 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | ||
1233 | return c4iw_ofld_send(&ep->com.dev->rdev, skb); | ||
1234 | } | ||
1235 | |||
1236 | static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1237 | { | ||
1238 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); | ||
1239 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1240 | unsigned int stid = GET_TID(rpl); | ||
1241 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | ||
1242 | |||
1243 | PDBG("%s ep %p\n", __func__, ep); | ||
1244 | ep->com.rpl_err = status2errno(rpl->status); | ||
1245 | ep->com.rpl_done = 1; | ||
1246 | wake_up(&ep->com.waitq); | ||
1247 | return 0; | ||
1248 | } | ||
1249 | |||
1250 | static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | ||
1251 | struct cpl_pass_accept_req *req) | ||
1252 | { | ||
1253 | struct cpl_pass_accept_rpl *rpl; | ||
1254 | unsigned int mtu_idx; | ||
1255 | u64 opt0; | ||
1256 | u32 opt2; | ||
1257 | int wscale; | ||
1258 | |||
1259 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1260 | BUG_ON(skb_cloned(skb)); | ||
1261 | skb_trim(skb, sizeof(*rpl)); | ||
1262 | skb_get(skb); | ||
1263 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | ||
1264 | wscale = compute_wscale(rcv_win); | ||
1265 | opt0 = KEEP_ALIVE(1) | | ||
1266 | WND_SCALE(wscale) | | ||
1267 | MSS_IDX(mtu_idx) | | ||
1268 | L2T_IDX(ep->l2t->idx) | | ||
1269 | TX_CHAN(ep->tx_chan) | | ||
1270 | SMAC_SEL(ep->smac_idx) | | ||
1271 | DSCP(ep->tos) | | ||
1272 | RCV_BUFSIZ(rcv_win>>10); | ||
1273 | opt2 = RX_CHANNEL(0) | | ||
1274 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | ||
1275 | |||
1276 | if (enable_tcp_timestamps && req->tcpopt.tstamp) | ||
1277 | opt2 |= TSTAMPS_EN(1); | ||
1278 | if (enable_tcp_sack && req->tcpopt.sack) | ||
1279 | opt2 |= SACK_EN(1); | ||
1280 | if (wscale && enable_tcp_window_scaling) | ||
1281 | opt2 |= WND_SCALE_EN(1); | ||
1282 | |||
1283 | rpl = cplhdr(skb); | ||
1284 | INIT_TP_WR(rpl, ep->hwtid); | ||
1285 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
1286 | ep->hwtid)); | ||
1287 | rpl->opt0 = cpu_to_be64(opt0); | ||
1288 | rpl->opt2 = cpu_to_be32(opt2); | ||
1289 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); | ||
1290 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | ||
1291 | |||
1292 | return; | ||
1293 | } | ||
1294 | |||
1295 | static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, | ||
1296 | struct sk_buff *skb) | ||
1297 | { | ||
1298 | PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, | ||
1299 | peer_ip); | ||
1300 | BUG_ON(skb_cloned(skb)); | ||
1301 | skb_trim(skb, sizeof(struct cpl_tid_release)); | ||
1302 | skb_get(skb); | ||
1303 | release_tid(&dev->rdev, hwtid, skb); | ||
1304 | return; | ||
1305 | } | ||
1306 | |||
1307 | static void get_4tuple(struct cpl_pass_accept_req *req, | ||
1308 | __be32 *local_ip, __be32 *peer_ip, | ||
1309 | __be16 *local_port, __be16 *peer_port) | ||
1310 | { | ||
1311 | int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); | ||
1312 | int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); | ||
1313 | struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); | ||
1314 | struct tcphdr *tcp = (struct tcphdr *) | ||
1315 | ((u8 *)(req + 1) + eth_len + ip_len); | ||
1316 | |||
1317 | PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, | ||
1318 | ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), | ||
1319 | ntohs(tcp->dest)); | ||
1320 | |||
1321 | *peer_ip = ip->saddr; | ||
1322 | *local_ip = ip->daddr; | ||
1323 | *peer_port = tcp->source; | ||
1324 | *local_port = tcp->dest; | ||
1325 | |||
1326 | return; | ||
1327 | } | ||
1328 | |||
1329 | static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1330 | { | ||
1331 | struct c4iw_ep *child_ep, *parent_ep; | ||
1332 | struct cpl_pass_accept_req *req = cplhdr(skb); | ||
1333 | unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); | ||
1334 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1335 | unsigned int hwtid = GET_TID(req); | ||
1336 | struct dst_entry *dst; | ||
1337 | struct l2t_entry *l2t; | ||
1338 | struct rtable *rt; | ||
1339 | __be32 local_ip, peer_ip; | ||
1340 | __be16 local_port, peer_port; | ||
1341 | struct net_device *pdev; | ||
1342 | u32 tx_chan, smac_idx; | ||
1343 | u16 rss_qid; | ||
1344 | u32 mtu; | ||
1345 | int step; | ||
1346 | int txq_idx; | ||
1347 | |||
1348 | parent_ep = lookup_stid(t, stid); | ||
1349 | PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); | ||
1350 | |||
1351 | get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); | ||
1352 | |||
1353 | if (state_read(&parent_ep->com) != LISTEN) { | ||
1354 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | ||
1355 | __func__); | ||
1356 | goto reject; | ||
1357 | } | ||
1358 | |||
1359 | /* Find output route */ | ||
1360 | rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, | ||
1361 | GET_POPEN_TOS(ntohl(req->tos_stid))); | ||
1362 | if (!rt) { | ||
1363 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | ||
1364 | __func__); | ||
1365 | goto reject; | ||
1366 | } | ||
1367 | dst = &rt->u.dst; | ||
1368 | if (dst->neighbour->dev->flags & IFF_LOOPBACK) { | ||
1369 | pdev = ip_dev_find(&init_net, peer_ip); | ||
1370 | BUG_ON(!pdev); | ||
1371 | l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, | ||
1372 | pdev, 0); | ||
1373 | mtu = pdev->mtu; | ||
1374 | tx_chan = cxgb4_port_chan(pdev); | ||
1375 | smac_idx = tx_chan << 1; | ||
1376 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | ||
1377 | txq_idx = cxgb4_port_idx(pdev) * step; | ||
1378 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | ||
1379 | rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; | ||
1380 | dev_put(pdev); | ||
1381 | } else { | ||
1382 | l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, | ||
1383 | dst->neighbour->dev, 0); | ||
1384 | mtu = dst_mtu(dst); | ||
1385 | tx_chan = cxgb4_port_chan(dst->neighbour->dev); | ||
1386 | smac_idx = tx_chan << 1; | ||
1387 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | ||
1388 | txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; | ||
1389 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | ||
1390 | rss_qid = dev->rdev.lldi.rxq_ids[ | ||
1391 | cxgb4_port_idx(dst->neighbour->dev) * step]; | ||
1392 | } | ||
1393 | if (!l2t) { | ||
1394 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | ||
1395 | __func__); | ||
1396 | dst_release(dst); | ||
1397 | goto reject; | ||
1398 | } | ||
1399 | |||
1400 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | ||
1401 | if (!child_ep) { | ||
1402 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | ||
1403 | __func__); | ||
1404 | cxgb4_l2t_release(l2t); | ||
1405 | dst_release(dst); | ||
1406 | goto reject; | ||
1407 | } | ||
1408 | state_set(&child_ep->com, CONNECTING); | ||
1409 | child_ep->com.dev = dev; | ||
1410 | child_ep->com.cm_id = NULL; | ||
1411 | child_ep->com.local_addr.sin_family = PF_INET; | ||
1412 | child_ep->com.local_addr.sin_port = local_port; | ||
1413 | child_ep->com.local_addr.sin_addr.s_addr = local_ip; | ||
1414 | child_ep->com.remote_addr.sin_family = PF_INET; | ||
1415 | child_ep->com.remote_addr.sin_port = peer_port; | ||
1416 | child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; | ||
1417 | c4iw_get_ep(&parent_ep->com); | ||
1418 | child_ep->parent_ep = parent_ep; | ||
1419 | child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); | ||
1420 | child_ep->l2t = l2t; | ||
1421 | child_ep->dst = dst; | ||
1422 | child_ep->hwtid = hwtid; | ||
1423 | child_ep->tx_chan = tx_chan; | ||
1424 | child_ep->smac_idx = smac_idx; | ||
1425 | child_ep->rss_qid = rss_qid; | ||
1426 | child_ep->mtu = mtu; | ||
1427 | child_ep->txq_idx = txq_idx; | ||
1428 | |||
1429 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, | ||
1430 | tx_chan, smac_idx, rss_qid); | ||
1431 | |||
1432 | init_timer(&child_ep->timer); | ||
1433 | cxgb4_insert_tid(t, child_ep, hwtid); | ||
1434 | accept_cr(child_ep, peer_ip, skb, req); | ||
1435 | goto out; | ||
1436 | reject: | ||
1437 | reject_cr(dev, hwtid, peer_ip, skb); | ||
1438 | out: | ||
1439 | return 0; | ||
1440 | } | ||
1441 | |||
1442 | static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1443 | { | ||
1444 | struct c4iw_ep *ep; | ||
1445 | struct cpl_pass_establish *req = cplhdr(skb); | ||
1446 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1447 | unsigned int tid = GET_TID(req); | ||
1448 | |||
1449 | ep = lookup_tid(t, tid); | ||
1450 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1451 | ep->snd_seq = be32_to_cpu(req->snd_isn); | ||
1452 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | ||
1453 | |||
1454 | set_emss(ep, ntohs(req->tcp_opt)); | ||
1455 | |||
1456 | dst_confirm(ep->dst); | ||
1457 | state_set(&ep->com, MPA_REQ_WAIT); | ||
1458 | start_ep_timer(ep); | ||
1459 | send_flowc(ep, skb); | ||
1460 | |||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1465 | { | ||
1466 | struct cpl_peer_close *hdr = cplhdr(skb); | ||
1467 | struct c4iw_ep *ep; | ||
1468 | struct c4iw_qp_attributes attrs; | ||
1469 | unsigned long flags; | ||
1470 | int disconnect = 1; | ||
1471 | int release = 0; | ||
1472 | int closing = 0; | ||
1473 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1474 | unsigned int tid = GET_TID(hdr); | ||
1475 | int start_timer = 0; | ||
1476 | int stop_timer = 0; | ||
1477 | |||
1478 | ep = lookup_tid(t, tid); | ||
1479 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1480 | dst_confirm(ep->dst); | ||
1481 | |||
1482 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1483 | switch (ep->com.state) { | ||
1484 | case MPA_REQ_WAIT: | ||
1485 | __state_set(&ep->com, CLOSING); | ||
1486 | break; | ||
1487 | case MPA_REQ_SENT: | ||
1488 | __state_set(&ep->com, CLOSING); | ||
1489 | connect_reply_upcall(ep, -ECONNRESET); | ||
1490 | break; | ||
1491 | case MPA_REQ_RCVD: | ||
1492 | |||
1493 | /* | ||
1494 | * We're gonna mark this puppy DEAD, but keep | ||
1495 | * the reference on it until the ULP accepts or | ||
1496 | * rejects the CR. Also wake up anyone waiting | ||
1497 | * in rdma connection migration (see c4iw_accept_cr()). | ||
1498 | */ | ||
1499 | __state_set(&ep->com, CLOSING); | ||
1500 | ep->com.rpl_done = 1; | ||
1501 | ep->com.rpl_err = -ECONNRESET; | ||
1502 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | ||
1503 | wake_up(&ep->com.waitq); | ||
1504 | break; | ||
1505 | case MPA_REP_SENT: | ||
1506 | __state_set(&ep->com, CLOSING); | ||
1507 | ep->com.rpl_done = 1; | ||
1508 | ep->com.rpl_err = -ECONNRESET; | ||
1509 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | ||
1510 | wake_up(&ep->com.waitq); | ||
1511 | break; | ||
1512 | case FPDU_MODE: | ||
1513 | start_timer = 1; | ||
1514 | __state_set(&ep->com, CLOSING); | ||
1515 | closing = 1; | ||
1516 | peer_close_upcall(ep); | ||
1517 | break; | ||
1518 | case ABORTING: | ||
1519 | disconnect = 0; | ||
1520 | break; | ||
1521 | case CLOSING: | ||
1522 | __state_set(&ep->com, MORIBUND); | ||
1523 | disconnect = 0; | ||
1524 | break; | ||
1525 | case MORIBUND: | ||
1526 | stop_timer = 1; | ||
1527 | if (ep->com.cm_id && ep->com.qp) { | ||
1528 | attrs.next_state = C4IW_QP_STATE_IDLE; | ||
1529 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1530 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1531 | } | ||
1532 | close_complete_upcall(ep); | ||
1533 | __state_set(&ep->com, DEAD); | ||
1534 | release = 1; | ||
1535 | disconnect = 0; | ||
1536 | break; | ||
1537 | case DEAD: | ||
1538 | disconnect = 0; | ||
1539 | break; | ||
1540 | default: | ||
1541 | BUG_ON(1); | ||
1542 | } | ||
1543 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1544 | if (closing) { | ||
1545 | attrs.next_state = C4IW_QP_STATE_CLOSING; | ||
1546 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1547 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1548 | } | ||
1549 | if (start_timer) | ||
1550 | start_ep_timer(ep); | ||
1551 | if (stop_timer) | ||
1552 | stop_ep_timer(ep); | ||
1553 | if (disconnect) | ||
1554 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
1555 | if (release) | ||
1556 | release_ep_resources(ep); | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | /* | ||
1561 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | ||
1562 | */ | ||
1563 | static int is_neg_adv_abort(unsigned int status) | ||
1564 | { | ||
1565 | return status == CPL_ERR_RTX_NEG_ADVICE || | ||
1566 | status == CPL_ERR_PERSIST_NEG_ADVICE; | ||
1567 | } | ||
1568 | |||
1569 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1570 | { | ||
1571 | struct cpl_abort_req_rss *req = cplhdr(skb); | ||
1572 | struct c4iw_ep *ep; | ||
1573 | struct cpl_abort_rpl *rpl; | ||
1574 | struct sk_buff *rpl_skb; | ||
1575 | struct c4iw_qp_attributes attrs; | ||
1576 | int ret; | ||
1577 | int release = 0; | ||
1578 | unsigned long flags; | ||
1579 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1580 | unsigned int tid = GET_TID(req); | ||
1581 | int stop_timer = 0; | ||
1582 | |||
1583 | ep = lookup_tid(t, tid); | ||
1584 | if (is_neg_adv_abort(req->status)) { | ||
1585 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | ||
1586 | ep->hwtid); | ||
1587 | return 0; | ||
1588 | } | ||
1589 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1590 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | ||
1591 | ep->com.state); | ||
1592 | switch (ep->com.state) { | ||
1593 | case CONNECTING: | ||
1594 | break; | ||
1595 | case MPA_REQ_WAIT: | ||
1596 | stop_timer = 1; | ||
1597 | break; | ||
1598 | case MPA_REQ_SENT: | ||
1599 | stop_timer = 1; | ||
1600 | connect_reply_upcall(ep, -ECONNRESET); | ||
1601 | break; | ||
1602 | case MPA_REP_SENT: | ||
1603 | ep->com.rpl_done = 1; | ||
1604 | ep->com.rpl_err = -ECONNRESET; | ||
1605 | PDBG("waking up ep %p\n", ep); | ||
1606 | wake_up(&ep->com.waitq); | ||
1607 | break; | ||
1608 | case MPA_REQ_RCVD: | ||
1609 | |||
1610 | /* | ||
1611 | * We're gonna mark this puppy DEAD, but keep | ||
1612 | * the reference on it until the ULP accepts or | ||
1613 | * rejects the CR. Also wake up anyone waiting | ||
1614 | * in rdma connection migration (see c4iw_accept_cr()). | ||
1615 | */ | ||
1616 | ep->com.rpl_done = 1; | ||
1617 | ep->com.rpl_err = -ECONNRESET; | ||
1618 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | ||
1619 | wake_up(&ep->com.waitq); | ||
1620 | break; | ||
1621 | case MORIBUND: | ||
1622 | case CLOSING: | ||
1623 | stop_timer = 1; | ||
1624 | /*FALLTHROUGH*/ | ||
1625 | case FPDU_MODE: | ||
1626 | if (ep->com.cm_id && ep->com.qp) { | ||
1627 | attrs.next_state = C4IW_QP_STATE_ERROR; | ||
1628 | ret = c4iw_modify_qp(ep->com.qp->rhp, | ||
1629 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | ||
1630 | &attrs, 1); | ||
1631 | if (ret) | ||
1632 | printk(KERN_ERR MOD | ||
1633 | "%s - qp <- error failed!\n", | ||
1634 | __func__); | ||
1635 | } | ||
1636 | peer_abort_upcall(ep); | ||
1637 | break; | ||
1638 | case ABORTING: | ||
1639 | break; | ||
1640 | case DEAD: | ||
1641 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); | ||
1642 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1643 | return 0; | ||
1644 | default: | ||
1645 | BUG_ON(1); | ||
1646 | break; | ||
1647 | } | ||
1648 | dst_confirm(ep->dst); | ||
1649 | if (ep->com.state != ABORTING) { | ||
1650 | __state_set(&ep->com, DEAD); | ||
1651 | release = 1; | ||
1652 | } | ||
1653 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1654 | |||
1655 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | ||
1656 | if (!rpl_skb) { | ||
1657 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | ||
1658 | __func__); | ||
1659 | release = 1; | ||
1660 | goto out; | ||
1661 | } | ||
1662 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | ||
1663 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | ||
1664 | INIT_TP_WR(rpl, ep->hwtid); | ||
1665 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | ||
1666 | rpl->cmd = CPL_ABORT_NO_RST; | ||
1667 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); | ||
1668 | out: | ||
1669 | if (stop_timer) | ||
1670 | stop_ep_timer(ep); | ||
1671 | if (release) | ||
1672 | release_ep_resources(ep); | ||
1673 | return 0; | ||
1674 | } | ||
1675 | |||
1676 | static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1677 | { | ||
1678 | struct c4iw_ep *ep; | ||
1679 | struct c4iw_qp_attributes attrs; | ||
1680 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | ||
1681 | unsigned long flags; | ||
1682 | int release = 0; | ||
1683 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1684 | unsigned int tid = GET_TID(rpl); | ||
1685 | int stop_timer = 0; | ||
1686 | |||
1687 | ep = lookup_tid(t, tid); | ||
1688 | |||
1689 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1690 | BUG_ON(!ep); | ||
1691 | |||
1692 | /* The cm_id may be null if we failed to connect */ | ||
1693 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1694 | switch (ep->com.state) { | ||
1695 | case CLOSING: | ||
1696 | __state_set(&ep->com, MORIBUND); | ||
1697 | break; | ||
1698 | case MORIBUND: | ||
1699 | stop_timer = 1; | ||
1700 | if ((ep->com.cm_id) && (ep->com.qp)) { | ||
1701 | attrs.next_state = C4IW_QP_STATE_IDLE; | ||
1702 | c4iw_modify_qp(ep->com.qp->rhp, | ||
1703 | ep->com.qp, | ||
1704 | C4IW_QP_ATTR_NEXT_STATE, | ||
1705 | &attrs, 1); | ||
1706 | } | ||
1707 | close_complete_upcall(ep); | ||
1708 | __state_set(&ep->com, DEAD); | ||
1709 | release = 1; | ||
1710 | break; | ||
1711 | case ABORTING: | ||
1712 | case DEAD: | ||
1713 | break; | ||
1714 | default: | ||
1715 | BUG_ON(1); | ||
1716 | break; | ||
1717 | } | ||
1718 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1719 | if (stop_timer) | ||
1720 | stop_ep_timer(ep); | ||
1721 | if (release) | ||
1722 | release_ep_resources(ep); | ||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1727 | { | ||
1728 | struct c4iw_ep *ep; | ||
1729 | struct cpl_rdma_terminate *term = cplhdr(skb); | ||
1730 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1731 | unsigned int tid = GET_TID(term); | ||
1732 | |||
1733 | ep = lookup_tid(t, tid); | ||
1734 | |||
1735 | if (state_read(&ep->com) != FPDU_MODE) | ||
1736 | return 0; | ||
1737 | |||
1738 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1739 | skb_pull(skb, sizeof *term); | ||
1740 | PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); | ||
1741 | skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, | ||
1742 | skb->len); | ||
1743 | ep->com.qp->attr.terminate_msg_len = skb->len; | ||
1744 | ep->com.qp->attr.is_terminate_local = 0; | ||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1748 | /* | ||
1749 | * Upcall from the adapter indicating data has been transmitted. | ||
1750 | * For us its just the single MPA request or reply. We can now free | ||
1751 | * the skb holding the mpa message. | ||
1752 | */ | ||
1753 | static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | ||
1754 | { | ||
1755 | struct c4iw_ep *ep; | ||
1756 | struct cpl_fw4_ack *hdr = cplhdr(skb); | ||
1757 | u8 credits = hdr->credits; | ||
1758 | unsigned int tid = GET_TID(hdr); | ||
1759 | struct tid_info *t = dev->rdev.lldi.tids; | ||
1760 | |||
1761 | |||
1762 | ep = lookup_tid(t, tid); | ||
1763 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | ||
1764 | if (credits == 0) { | ||
1765 | PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n", | ||
1766 | __func__, ep, ep->hwtid, state_read(&ep->com)); | ||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | dst_confirm(ep->dst); | ||
1771 | if (ep->mpa_skb) { | ||
1772 | PDBG("%s last streaming msg ack ep %p tid %u state %u " | ||
1773 | "initiator %u freeing skb\n", __func__, ep, ep->hwtid, | ||
1774 | state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); | ||
1775 | kfree_skb(ep->mpa_skb); | ||
1776 | ep->mpa_skb = NULL; | ||
1777 | } | ||
1778 | return 0; | ||
1779 | } | ||
1780 | |||
1781 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | ||
1782 | { | ||
1783 | int err; | ||
1784 | struct c4iw_ep *ep = to_ep(cm_id); | ||
1785 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1786 | |||
1787 | if (state_read(&ep->com) == DEAD) { | ||
1788 | c4iw_put_ep(&ep->com); | ||
1789 | return -ECONNRESET; | ||
1790 | } | ||
1791 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | ||
1792 | if (mpa_rev == 0) | ||
1793 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1794 | else { | ||
1795 | err = send_mpa_reject(ep, pdata, pdata_len); | ||
1796 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
1797 | } | ||
1798 | c4iw_put_ep(&ep->com); | ||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1802 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
1803 | { | ||
1804 | int err; | ||
1805 | struct c4iw_qp_attributes attrs; | ||
1806 | enum c4iw_qp_attr_mask mask; | ||
1807 | struct c4iw_ep *ep = to_ep(cm_id); | ||
1808 | struct c4iw_dev *h = to_c4iw_dev(cm_id->device); | ||
1809 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); | ||
1810 | |||
1811 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
1812 | if (state_read(&ep->com) == DEAD) { | ||
1813 | err = -ECONNRESET; | ||
1814 | goto err; | ||
1815 | } | ||
1816 | |||
1817 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | ||
1818 | BUG_ON(!qp); | ||
1819 | |||
1820 | if ((conn_param->ord > c4iw_max_read_depth) || | ||
1821 | (conn_param->ird > c4iw_max_read_depth)) { | ||
1822 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1823 | err = -EINVAL; | ||
1824 | goto err; | ||
1825 | } | ||
1826 | |||
1827 | cm_id->add_ref(cm_id); | ||
1828 | ep->com.cm_id = cm_id; | ||
1829 | ep->com.qp = qp; | ||
1830 | |||
1831 | ep->ird = conn_param->ird; | ||
1832 | ep->ord = conn_param->ord; | ||
1833 | |||
1834 | if (peer2peer && ep->ird == 0) | ||
1835 | ep->ird = 1; | ||
1836 | |||
1837 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | ||
1838 | |||
1839 | /* bind QP to EP and move to RTS */ | ||
1840 | attrs.mpa_attr = ep->mpa_attr; | ||
1841 | attrs.max_ird = ep->ird; | ||
1842 | attrs.max_ord = ep->ord; | ||
1843 | attrs.llp_stream_handle = ep; | ||
1844 | attrs.next_state = C4IW_QP_STATE_RTS; | ||
1845 | |||
1846 | /* bind QP and TID with INIT_WR */ | ||
1847 | mask = C4IW_QP_ATTR_NEXT_STATE | | ||
1848 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | ||
1849 | C4IW_QP_ATTR_MPA_ATTR | | ||
1850 | C4IW_QP_ATTR_MAX_IRD | | ||
1851 | C4IW_QP_ATTR_MAX_ORD; | ||
1852 | |||
1853 | err = c4iw_modify_qp(ep->com.qp->rhp, | ||
1854 | ep->com.qp, mask, &attrs, 1); | ||
1855 | if (err) | ||
1856 | goto err1; | ||
1857 | err = send_mpa_reply(ep, conn_param->private_data, | ||
1858 | conn_param->private_data_len); | ||
1859 | if (err) | ||
1860 | goto err1; | ||
1861 | |||
1862 | state_set(&ep->com, FPDU_MODE); | ||
1863 | established_upcall(ep); | ||
1864 | c4iw_put_ep(&ep->com); | ||
1865 | return 0; | ||
1866 | err1: | ||
1867 | ep->com.cm_id = NULL; | ||
1868 | ep->com.qp = NULL; | ||
1869 | cm_id->rem_ref(cm_id); | ||
1870 | err: | ||
1871 | c4iw_put_ep(&ep->com); | ||
1872 | return err; | ||
1873 | } | ||
1874 | |||
1875 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
1876 | { | ||
1877 | int err = 0; | ||
1878 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | ||
1879 | struct c4iw_ep *ep; | ||
1880 | struct rtable *rt; | ||
1881 | struct net_device *pdev; | ||
1882 | int step; | ||
1883 | |||
1884 | if ((conn_param->ord > c4iw_max_read_depth) || | ||
1885 | (conn_param->ird > c4iw_max_read_depth)) { | ||
1886 | err = -EINVAL; | ||
1887 | goto out; | ||
1888 | } | ||
1889 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | ||
1890 | if (!ep) { | ||
1891 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | ||
1892 | err = -ENOMEM; | ||
1893 | goto out; | ||
1894 | } | ||
1895 | init_timer(&ep->timer); | ||
1896 | ep->plen = conn_param->private_data_len; | ||
1897 | if (ep->plen) | ||
1898 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | ||
1899 | conn_param->private_data, ep->plen); | ||
1900 | ep->ird = conn_param->ird; | ||
1901 | ep->ord = conn_param->ord; | ||
1902 | |||
1903 | if (peer2peer && ep->ord == 0) | ||
1904 | ep->ord = 1; | ||
1905 | |||
1906 | cm_id->add_ref(cm_id); | ||
1907 | ep->com.dev = dev; | ||
1908 | ep->com.cm_id = cm_id; | ||
1909 | ep->com.qp = get_qhp(dev, conn_param->qpn); | ||
1910 | BUG_ON(!ep->com.qp); | ||
1911 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | ||
1912 | ep->com.qp, cm_id); | ||
1913 | |||
1914 | /* | ||
1915 | * Allocate an active TID to initiate a TCP connection. | ||
1916 | */ | ||
1917 | ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); | ||
1918 | if (ep->atid == -1) { | ||
1919 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | ||
1920 | err = -ENOMEM; | ||
1921 | goto fail2; | ||
1922 | } | ||
1923 | |||
1924 | PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, | ||
1925 | ntohl(cm_id->local_addr.sin_addr.s_addr), | ||
1926 | ntohs(cm_id->local_addr.sin_port), | ||
1927 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | ||
1928 | ntohs(cm_id->remote_addr.sin_port)); | ||
1929 | |||
1930 | /* find a route */ | ||
1931 | rt = find_route(dev, | ||
1932 | cm_id->local_addr.sin_addr.s_addr, | ||
1933 | cm_id->remote_addr.sin_addr.s_addr, | ||
1934 | cm_id->local_addr.sin_port, | ||
1935 | cm_id->remote_addr.sin_port, 0); | ||
1936 | if (!rt) { | ||
1937 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | ||
1938 | err = -EHOSTUNREACH; | ||
1939 | goto fail3; | ||
1940 | } | ||
1941 | ep->dst = &rt->u.dst; | ||
1942 | |||
1943 | /* get a l2t entry */ | ||
1944 | if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { | ||
1945 | PDBG("%s LOOPBACK\n", __func__); | ||
1946 | pdev = ip_dev_find(&init_net, | ||
1947 | cm_id->remote_addr.sin_addr.s_addr); | ||
1948 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1949 | ep->dst->neighbour, | ||
1950 | pdev, 0); | ||
1951 | ep->mtu = pdev->mtu; | ||
1952 | ep->tx_chan = cxgb4_port_chan(pdev); | ||
1953 | ep->smac_idx = ep->tx_chan << 1; | ||
1954 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1955 | ep->com.dev->rdev.lldi.nchan; | ||
1956 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | ||
1957 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1958 | ep->com.dev->rdev.lldi.nchan; | ||
1959 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1960 | cxgb4_port_idx(pdev) * step]; | ||
1961 | dev_put(pdev); | ||
1962 | } else { | ||
1963 | ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, | ||
1964 | ep->dst->neighbour, | ||
1965 | ep->dst->neighbour->dev, 0); | ||
1966 | ep->mtu = dst_mtu(ep->dst); | ||
1967 | ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); | ||
1968 | ep->smac_idx = ep->tx_chan << 1; | ||
1969 | step = ep->com.dev->rdev.lldi.ntxq / | ||
1970 | ep->com.dev->rdev.lldi.nchan; | ||
1971 | ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; | ||
1972 | step = ep->com.dev->rdev.lldi.nrxq / | ||
1973 | ep->com.dev->rdev.lldi.nchan; | ||
1974 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | ||
1975 | cxgb4_port_idx(ep->dst->neighbour->dev) * step]; | ||
1976 | } | ||
1977 | if (!ep->l2t) { | ||
1978 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); | ||
1979 | err = -ENOMEM; | ||
1980 | goto fail4; | ||
1981 | } | ||
1982 | |||
1983 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | ||
1984 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | ||
1985 | ep->l2t->idx); | ||
1986 | |||
1987 | state_set(&ep->com, CONNECTING); | ||
1988 | ep->tos = 0; | ||
1989 | ep->com.local_addr = cm_id->local_addr; | ||
1990 | ep->com.remote_addr = cm_id->remote_addr; | ||
1991 | |||
1992 | /* send connect request to rnic */ | ||
1993 | err = send_connect(ep); | ||
1994 | if (!err) | ||
1995 | goto out; | ||
1996 | |||
1997 | cxgb4_l2t_release(ep->l2t); | ||
1998 | fail4: | ||
1999 | dst_release(ep->dst); | ||
2000 | fail3: | ||
2001 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | ||
2002 | fail2: | ||
2003 | cm_id->rem_ref(cm_id); | ||
2004 | c4iw_put_ep(&ep->com); | ||
2005 | out: | ||
2006 | return err; | ||
2007 | } | ||
2008 | |||
2009 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | ||
2010 | { | ||
2011 | int err = 0; | ||
2012 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | ||
2013 | struct c4iw_listen_ep *ep; | ||
2014 | |||
2015 | |||
2016 | might_sleep(); | ||
2017 | |||
2018 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | ||
2019 | if (!ep) { | ||
2020 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | ||
2021 | err = -ENOMEM; | ||
2022 | goto fail1; | ||
2023 | } | ||
2024 | PDBG("%s ep %p\n", __func__, ep); | ||
2025 | cm_id->add_ref(cm_id); | ||
2026 | ep->com.cm_id = cm_id; | ||
2027 | ep->com.dev = dev; | ||
2028 | ep->backlog = backlog; | ||
2029 | ep->com.local_addr = cm_id->local_addr; | ||
2030 | |||
2031 | /* | ||
2032 | * Allocate a server TID. | ||
2033 | */ | ||
2034 | ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); | ||
2035 | if (ep->stid == -1) { | ||
2036 | printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); | ||
2037 | err = -ENOMEM; | ||
2038 | goto fail2; | ||
2039 | } | ||
2040 | |||
2041 | state_set(&ep->com, LISTEN); | ||
2042 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, | ||
2043 | ep->com.local_addr.sin_addr.s_addr, | ||
2044 | ep->com.local_addr.sin_port, | ||
2045 | ep->com.dev->rdev.lldi.rxq_ids[0]); | ||
2046 | if (err) | ||
2047 | goto fail3; | ||
2048 | |||
2049 | /* wait for pass_open_rpl */ | ||
2050 | wait_event(ep->com.waitq, ep->com.rpl_done); | ||
2051 | err = ep->com.rpl_err; | ||
2052 | if (!err) { | ||
2053 | cm_id->provider_data = ep; | ||
2054 | goto out; | ||
2055 | } | ||
2056 | fail3: | ||
2057 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | ||
2058 | fail2: | ||
2059 | cm_id->rem_ref(cm_id); | ||
2060 | c4iw_put_ep(&ep->com); | ||
2061 | fail1: | ||
2062 | out: | ||
2063 | return err; | ||
2064 | } | ||
2065 | |||
2066 | int c4iw_destroy_listen(struct iw_cm_id *cm_id) | ||
2067 | { | ||
2068 | int err; | ||
2069 | struct c4iw_listen_ep *ep = to_listen_ep(cm_id); | ||
2070 | |||
2071 | PDBG("%s ep %p\n", __func__, ep); | ||
2072 | |||
2073 | might_sleep(); | ||
2074 | state_set(&ep->com, DEAD); | ||
2075 | ep->com.rpl_done = 0; | ||
2076 | ep->com.rpl_err = 0; | ||
2077 | err = listen_stop(ep); | ||
2078 | if (err) | ||
2079 | goto done; | ||
2080 | wait_event(ep->com.waitq, ep->com.rpl_done); | ||
2081 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | ||
2082 | done: | ||
2083 | err = ep->com.rpl_err; | ||
2084 | cm_id->rem_ref(cm_id); | ||
2085 | c4iw_put_ep(&ep->com); | ||
2086 | return err; | ||
2087 | } | ||
2088 | |||
2089 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | ||
2090 | { | ||
2091 | int ret = 0; | ||
2092 | unsigned long flags; | ||
2093 | int close = 0; | ||
2094 | int fatal = 0; | ||
2095 | struct c4iw_rdev *rdev; | ||
2096 | int start_timer = 0; | ||
2097 | int stop_timer = 0; | ||
2098 | |||
2099 | spin_lock_irqsave(&ep->com.lock, flags); | ||
2100 | |||
2101 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | ||
2102 | states[ep->com.state], abrupt); | ||
2103 | |||
2104 | rdev = &ep->com.dev->rdev; | ||
2105 | if (c4iw_fatal_error(rdev)) { | ||
2106 | fatal = 1; | ||
2107 | close_complete_upcall(ep); | ||
2108 | ep->com.state = DEAD; | ||
2109 | } | ||
2110 | switch (ep->com.state) { | ||
2111 | case MPA_REQ_WAIT: | ||
2112 | case MPA_REQ_SENT: | ||
2113 | case MPA_REQ_RCVD: | ||
2114 | case MPA_REP_SENT: | ||
2115 | case FPDU_MODE: | ||
2116 | close = 1; | ||
2117 | if (abrupt) | ||
2118 | ep->com.state = ABORTING; | ||
2119 | else { | ||
2120 | ep->com.state = CLOSING; | ||
2121 | start_timer = 1; | ||
2122 | } | ||
2123 | set_bit(CLOSE_SENT, &ep->com.flags); | ||
2124 | break; | ||
2125 | case CLOSING: | ||
2126 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | ||
2127 | close = 1; | ||
2128 | if (abrupt) { | ||
2129 | stop_timer = 1; | ||
2130 | ep->com.state = ABORTING; | ||
2131 | } else | ||
2132 | ep->com.state = MORIBUND; | ||
2133 | } | ||
2134 | break; | ||
2135 | case MORIBUND: | ||
2136 | case ABORTING: | ||
2137 | case DEAD: | ||
2138 | PDBG("%s ignoring disconnect ep %p state %u\n", | ||
2139 | __func__, ep, ep->com.state); | ||
2140 | break; | ||
2141 | default: | ||
2142 | BUG(); | ||
2143 | break; | ||
2144 | } | ||
2145 | |||
2146 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
2147 | if (start_timer) | ||
2148 | start_ep_timer(ep); | ||
2149 | if (stop_timer) | ||
2150 | stop_ep_timer(ep); | ||
2151 | if (close) { | ||
2152 | if (abrupt) | ||
2153 | ret = abort_connection(ep, NULL, gfp); | ||
2154 | else | ||
2155 | ret = send_halfclose(ep, gfp); | ||
2156 | if (ret) | ||
2157 | fatal = 1; | ||
2158 | } | ||
2159 | if (fatal) | ||
2160 | release_ep_resources(ep); | ||
2161 | return ret; | ||
2162 | } | ||
2163 | |||
2164 | /* | ||
2165 | * These are the real handlers that are called from a | ||
2166 | * work queue. | ||
2167 | */ | ||
2168 | static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { | ||
2169 | [CPL_ACT_ESTABLISH] = act_establish, | ||
2170 | [CPL_ACT_OPEN_RPL] = act_open_rpl, | ||
2171 | [CPL_RX_DATA] = rx_data, | ||
2172 | [CPL_ABORT_RPL_RSS] = abort_rpl, | ||
2173 | [CPL_ABORT_RPL] = abort_rpl, | ||
2174 | [CPL_PASS_OPEN_RPL] = pass_open_rpl, | ||
2175 | [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, | ||
2176 | [CPL_PASS_ACCEPT_REQ] = pass_accept_req, | ||
2177 | [CPL_PASS_ESTABLISH] = pass_establish, | ||
2178 | [CPL_PEER_CLOSE] = peer_close, | ||
2179 | [CPL_ABORT_REQ_RSS] = peer_abort, | ||
2180 | [CPL_CLOSE_CON_RPL] = close_con_rpl, | ||
2181 | [CPL_RDMA_TERMINATE] = terminate, | ||
2182 | [CPL_FW4_ACK] = fw4_ack | ||
2183 | }; | ||
2184 | |||
2185 | static void process_timeout(struct c4iw_ep *ep) | ||
2186 | { | ||
2187 | struct c4iw_qp_attributes attrs; | ||
2188 | int abort = 1; | ||
2189 | |||
2190 | spin_lock_irq(&ep->com.lock); | ||
2191 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, | ||
2192 | ep->com.state); | ||
2193 | switch (ep->com.state) { | ||
2194 | case MPA_REQ_SENT: | ||
2195 | __state_set(&ep->com, ABORTING); | ||
2196 | connect_reply_upcall(ep, -ETIMEDOUT); | ||
2197 | break; | ||
2198 | case MPA_REQ_WAIT: | ||
2199 | __state_set(&ep->com, ABORTING); | ||
2200 | break; | ||
2201 | case CLOSING: | ||
2202 | case MORIBUND: | ||
2203 | if (ep->com.cm_id && ep->com.qp) { | ||
2204 | attrs.next_state = C4IW_QP_STATE_ERROR; | ||
2205 | c4iw_modify_qp(ep->com.qp->rhp, | ||
2206 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | ||
2207 | &attrs, 1); | ||
2208 | } | ||
2209 | __state_set(&ep->com, ABORTING); | ||
2210 | break; | ||
2211 | default: | ||
2212 | printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", | ||
2213 | __func__, ep, ep->hwtid, ep->com.state); | ||
2214 | WARN_ON(1); | ||
2215 | abort = 0; | ||
2216 | } | ||
2217 | spin_unlock_irq(&ep->com.lock); | ||
2218 | if (abort) | ||
2219 | abort_connection(ep, NULL, GFP_KERNEL); | ||
2220 | c4iw_put_ep(&ep->com); | ||
2221 | } | ||
2222 | |||
2223 | static void process_timedout_eps(void) | ||
2224 | { | ||
2225 | struct c4iw_ep *ep; | ||
2226 | |||
2227 | spin_lock_irq(&timeout_lock); | ||
2228 | while (!list_empty(&timeout_list)) { | ||
2229 | struct list_head *tmp; | ||
2230 | |||
2231 | tmp = timeout_list.next; | ||
2232 | list_del(tmp); | ||
2233 | spin_unlock_irq(&timeout_lock); | ||
2234 | ep = list_entry(tmp, struct c4iw_ep, entry); | ||
2235 | process_timeout(ep); | ||
2236 | spin_lock_irq(&timeout_lock); | ||
2237 | } | ||
2238 | spin_unlock_irq(&timeout_lock); | ||
2239 | } | ||
2240 | |||
2241 | static void process_work(struct work_struct *work) | ||
2242 | { | ||
2243 | struct sk_buff *skb = NULL; | ||
2244 | struct c4iw_dev *dev; | ||
2245 | struct cpl_act_establish *rpl = cplhdr(skb); | ||
2246 | unsigned int opcode; | ||
2247 | int ret; | ||
2248 | |||
2249 | while ((skb = skb_dequeue(&rxq))) { | ||
2250 | rpl = cplhdr(skb); | ||
2251 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | ||
2252 | opcode = rpl->ot.opcode; | ||
2253 | |||
2254 | BUG_ON(!work_handlers[opcode]); | ||
2255 | ret = work_handlers[opcode](dev, skb); | ||
2256 | if (!ret) | ||
2257 | kfree_skb(skb); | ||
2258 | } | ||
2259 | process_timedout_eps(); | ||
2260 | } | ||
2261 | |||
2262 | static DECLARE_WORK(skb_work, process_work); | ||
2263 | |||
2264 | static void ep_timeout(unsigned long arg) | ||
2265 | { | ||
2266 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | ||
2267 | |||
2268 | spin_lock(&timeout_lock); | ||
2269 | list_add_tail(&ep->entry, &timeout_list); | ||
2270 | spin_unlock(&timeout_lock); | ||
2271 | queue_work(workq, &skb_work); | ||
2272 | } | ||
2273 | |||
2274 | /* | ||
2275 | * All the CM events are handled on a work queue to have a safe context. | ||
2276 | */ | ||
2277 | static int sched(struct c4iw_dev *dev, struct sk_buff *skb) | ||
2278 | { | ||
2279 | |||
2280 | /* | ||
2281 | * Save dev in the skb->cb area. | ||
2282 | */ | ||
2283 | *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; | ||
2284 | |||
2285 | /* | ||
2286 | * Queue the skb and schedule the worker thread. | ||
2287 | */ | ||
2288 | skb_queue_tail(&rxq, skb); | ||
2289 | queue_work(workq, &skb_work); | ||
2290 | return 0; | ||
2291 | } | ||
2292 | |||
2293 | static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | ||
2294 | { | ||
2295 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb); | ||
2296 | |||
2297 | if (rpl->status != CPL_ERR_NONE) { | ||
2298 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | ||
2299 | "for tid %u\n", rpl->status, GET_TID(rpl)); | ||
2300 | } | ||
2301 | return 0; | ||
2302 | } | ||
2303 | |||
2304 | static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | ||
2305 | { | ||
2306 | struct cpl_fw6_msg *rpl = cplhdr(skb); | ||
2307 | struct c4iw_wr_wait *wr_waitp; | ||
2308 | int ret; | ||
2309 | |||
2310 | PDBG("%s type %u\n", __func__, rpl->type); | ||
2311 | |||
2312 | switch (rpl->type) { | ||
2313 | case 1: | ||
2314 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); | ||
2315 | wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; | ||
2316 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); | ||
2317 | if (wr_waitp) { | ||
2318 | wr_waitp->ret = ret; | ||
2319 | wr_waitp->done = 1; | ||
2320 | wake_up(&wr_waitp->wait); | ||
2321 | } | ||
2322 | break; | ||
2323 | case 2: | ||
2324 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | ||
2325 | break; | ||
2326 | default: | ||
2327 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, | ||
2328 | rpl->type); | ||
2329 | break; | ||
2330 | } | ||
2331 | return 0; | ||
2332 | } | ||
2333 | |||
2334 | /* | ||
2335 | * Most upcalls from the T4 Core go to sched() to | ||
2336 | * schedule the processing on a work queue. | ||
2337 | */ | ||
2338 | c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { | ||
2339 | [CPL_ACT_ESTABLISH] = sched, | ||
2340 | [CPL_ACT_OPEN_RPL] = sched, | ||
2341 | [CPL_RX_DATA] = sched, | ||
2342 | [CPL_ABORT_RPL_RSS] = sched, | ||
2343 | [CPL_ABORT_RPL] = sched, | ||
2344 | [CPL_PASS_OPEN_RPL] = sched, | ||
2345 | [CPL_CLOSE_LISTSRV_RPL] = sched, | ||
2346 | [CPL_PASS_ACCEPT_REQ] = sched, | ||
2347 | [CPL_PASS_ESTABLISH] = sched, | ||
2348 | [CPL_PEER_CLOSE] = sched, | ||
2349 | [CPL_CLOSE_CON_RPL] = sched, | ||
2350 | [CPL_ABORT_REQ_RSS] = sched, | ||
2351 | [CPL_RDMA_TERMINATE] = sched, | ||
2352 | [CPL_FW4_ACK] = sched, | ||
2353 | [CPL_SET_TCB_RPL] = set_tcb_rpl, | ||
2354 | [CPL_FW6_MSG] = fw6_msg | ||
2355 | }; | ||
2356 | |||
2357 | int __init c4iw_cm_init(void) | ||
2358 | { | ||
2359 | spin_lock_init(&timeout_lock); | ||
2360 | skb_queue_head_init(&rxq); | ||
2361 | |||
2362 | workq = create_singlethread_workqueue("iw_cxgb4"); | ||
2363 | if (!workq) | ||
2364 | return -ENOMEM; | ||
2365 | |||
2366 | return 0; | ||
2367 | } | ||
2368 | |||
2369 | void __exit c4iw_cm_term(void) | ||
2370 | { | ||
2371 | WARN_ON(!list_empty(&timeout_list)); | ||
2372 | flush_workqueue(workq); | ||
2373 | destroy_workqueue(workq); | ||
2374 | } | ||