diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_cm.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 2081 |
1 files changed, 2081 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c new file mode 100644 index 000000000000..a522b1baa3b4 --- /dev/null +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -0,0 +1,2081 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | ||
3 | * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/timer.h> | ||
38 | #include <linux/notifier.h> | ||
39 | |||
40 | #include <net/neighbour.h> | ||
41 | #include <net/netevent.h> | ||
42 | #include <net/route.h> | ||
43 | |||
44 | #include "tcb.h" | ||
45 | #include "cxgb3_offload.h" | ||
46 | #include "iwch.h" | ||
47 | #include "iwch_provider.h" | ||
48 | #include "iwch_cm.h" | ||
49 | |||
50 | static char *states[] = { | ||
51 | "idle", | ||
52 | "listen", | ||
53 | "connecting", | ||
54 | "mpa_wait_req", | ||
55 | "mpa_req_sent", | ||
56 | "mpa_req_rcvd", | ||
57 | "mpa_rep_sent", | ||
58 | "fpdu_mode", | ||
59 | "aborting", | ||
60 | "closing", | ||
61 | "moribund", | ||
62 | "dead", | ||
63 | NULL, | ||
64 | }; | ||
65 | |||
66 | static int ep_timeout_secs = 10; | ||
67 | module_param(ep_timeout_secs, int, 0444); | ||
68 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | ||
69 | "in seconds (default=10)"); | ||
70 | |||
71 | static int mpa_rev = 1; | ||
72 | module_param(mpa_rev, int, 0444); | ||
73 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | ||
74 | "1 is spec compliant. (default=1)"); | ||
75 | |||
76 | static int markers_enabled = 0; | ||
77 | module_param(markers_enabled, int, 0444); | ||
78 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | ||
79 | |||
80 | static int crc_enabled = 1; | ||
81 | module_param(crc_enabled, int, 0444); | ||
82 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | ||
83 | |||
84 | static int rcv_win = 256 * 1024; | ||
85 | module_param(rcv_win, int, 0444); | ||
86 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)"); | ||
87 | |||
88 | static int snd_win = 32 * 1024; | ||
89 | module_param(snd_win, int, 0444); | ||
90 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); | ||
91 | |||
92 | static unsigned int nocong = 0; | ||
93 | module_param(nocong, uint, 0444); | ||
94 | MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)"); | ||
95 | |||
96 | static unsigned int cong_flavor = 1; | ||
97 | module_param(cong_flavor, uint, 0444); | ||
98 | MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); | ||
99 | |||
100 | static void process_work(struct work_struct *work); | ||
101 | static struct workqueue_struct *workq; | ||
102 | static DECLARE_WORK(skb_work, process_work); | ||
103 | |||
104 | static struct sk_buff_head rxq; | ||
105 | static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS]; | ||
106 | |||
107 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | ||
108 | static void ep_timeout(unsigned long arg); | ||
109 | static void connect_reply_upcall(struct iwch_ep *ep, int status); | ||
110 | |||
111 | static void start_ep_timer(struct iwch_ep *ep) | ||
112 | { | ||
113 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
114 | if (timer_pending(&ep->timer)) { | ||
115 | PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); | ||
116 | del_timer_sync(&ep->timer); | ||
117 | } else | ||
118 | get_ep(&ep->com); | ||
119 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | ||
120 | ep->timer.data = (unsigned long)ep; | ||
121 | ep->timer.function = ep_timeout; | ||
122 | add_timer(&ep->timer); | ||
123 | } | ||
124 | |||
125 | static void stop_ep_timer(struct iwch_ep *ep) | ||
126 | { | ||
127 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
128 | del_timer_sync(&ep->timer); | ||
129 | put_ep(&ep->com); | ||
130 | } | ||
131 | |||
132 | static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) | ||
133 | { | ||
134 | struct cpl_tid_release *req; | ||
135 | |||
136 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | ||
137 | if (!skb) | ||
138 | return; | ||
139 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | ||
140 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
141 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | ||
142 | skb->priority = CPL_PRIORITY_SETUP; | ||
143 | tdev->send(tdev, skb); | ||
144 | return; | ||
145 | } | ||
146 | |||
147 | int iwch_quiesce_tid(struct iwch_ep *ep) | ||
148 | { | ||
149 | struct cpl_set_tcb_field *req; | ||
150 | struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
151 | |||
152 | if (!skb) | ||
153 | return -ENOMEM; | ||
154 | req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); | ||
155 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
156 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
157 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); | ||
158 | req->reply = 0; | ||
159 | req->cpu_idx = 0; | ||
160 | req->word = htons(W_TCB_RX_QUIESCE); | ||
161 | req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); | ||
162 | req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); | ||
163 | |||
164 | skb->priority = CPL_PRIORITY_DATA; | ||
165 | ep->com.tdev->send(ep->com.tdev, skb); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | int iwch_resume_tid(struct iwch_ep *ep) | ||
170 | { | ||
171 | struct cpl_set_tcb_field *req; | ||
172 | struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
173 | |||
174 | if (!skb) | ||
175 | return -ENOMEM; | ||
176 | req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); | ||
177 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
178 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
179 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); | ||
180 | req->reply = 0; | ||
181 | req->cpu_idx = 0; | ||
182 | req->word = htons(W_TCB_RX_QUIESCE); | ||
183 | req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); | ||
184 | req->val = 0; | ||
185 | |||
186 | skb->priority = CPL_PRIORITY_DATA; | ||
187 | ep->com.tdev->send(ep->com.tdev, skb); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static void set_emss(struct iwch_ep *ep, u16 opt) | ||
192 | { | ||
193 | PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); | ||
194 | ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; | ||
195 | if (G_TCPOPT_TSTAMP(opt)) | ||
196 | ep->emss -= 12; | ||
197 | if (ep->emss < 128) | ||
198 | ep->emss = 128; | ||
199 | PDBG("emss=%d\n", ep->emss); | ||
200 | } | ||
201 | |||
202 | static enum iwch_ep_state state_read(struct iwch_ep_common *epc) | ||
203 | { | ||
204 | unsigned long flags; | ||
205 | enum iwch_ep_state state; | ||
206 | |||
207 | spin_lock_irqsave(&epc->lock, flags); | ||
208 | state = epc->state; | ||
209 | spin_unlock_irqrestore(&epc->lock, flags); | ||
210 | return state; | ||
211 | } | ||
212 | |||
213 | static inline void __state_set(struct iwch_ep_common *epc, | ||
214 | enum iwch_ep_state new) | ||
215 | { | ||
216 | epc->state = new; | ||
217 | } | ||
218 | |||
219 | static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) | ||
220 | { | ||
221 | unsigned long flags; | ||
222 | |||
223 | spin_lock_irqsave(&epc->lock, flags); | ||
224 | PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); | ||
225 | __state_set(epc, new); | ||
226 | spin_unlock_irqrestore(&epc->lock, flags); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | static void *alloc_ep(int size, gfp_t gfp) | ||
231 | { | ||
232 | struct iwch_ep_common *epc; | ||
233 | |||
234 | epc = kmalloc(size, gfp); | ||
235 | if (epc) { | ||
236 | memset(epc, 0, size); | ||
237 | kref_init(&epc->kref); | ||
238 | spin_lock_init(&epc->lock); | ||
239 | init_waitqueue_head(&epc->waitq); | ||
240 | } | ||
241 | PDBG("%s alloc ep %p\n", __FUNCTION__, epc); | ||
242 | return epc; | ||
243 | } | ||
244 | |||
245 | void __free_ep(struct kref *kref) | ||
246 | { | ||
247 | struct iwch_ep_common *epc; | ||
248 | epc = container_of(kref, struct iwch_ep_common, kref); | ||
249 | PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); | ||
250 | kfree(epc); | ||
251 | } | ||
252 | |||
253 | static void release_ep_resources(struct iwch_ep *ep) | ||
254 | { | ||
255 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | ||
256 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | ||
257 | dst_release(ep->dst); | ||
258 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
259 | if (ep->com.tdev->type == T3B) | ||
260 | release_tid(ep->com.tdev, ep->hwtid, NULL); | ||
261 | put_ep(&ep->com); | ||
262 | } | ||
263 | |||
264 | static void process_work(struct work_struct *work) | ||
265 | { | ||
266 | struct sk_buff *skb = NULL; | ||
267 | void *ep; | ||
268 | struct t3cdev *tdev; | ||
269 | int ret; | ||
270 | |||
271 | while ((skb = skb_dequeue(&rxq))) { | ||
272 | ep = *((void **) (skb->cb)); | ||
273 | tdev = *((struct t3cdev **) (skb->cb + sizeof(void *))); | ||
274 | ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); | ||
275 | if (ret & CPL_RET_BUF_DONE) | ||
276 | kfree_skb(skb); | ||
277 | |||
278 | /* | ||
279 | * ep was referenced in sched(), and is freed here. | ||
280 | */ | ||
281 | put_ep((struct iwch_ep_common *)ep); | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static int status2errno(int status) | ||
286 | { | ||
287 | switch (status) { | ||
288 | case CPL_ERR_NONE: | ||
289 | return 0; | ||
290 | case CPL_ERR_CONN_RESET: | ||
291 | return -ECONNRESET; | ||
292 | case CPL_ERR_ARP_MISS: | ||
293 | return -EHOSTUNREACH; | ||
294 | case CPL_ERR_CONN_TIMEDOUT: | ||
295 | return -ETIMEDOUT; | ||
296 | case CPL_ERR_TCAM_FULL: | ||
297 | return -ENOMEM; | ||
298 | case CPL_ERR_CONN_EXIST: | ||
299 | return -EADDRINUSE; | ||
300 | default: | ||
301 | return -EIO; | ||
302 | } | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Try and reuse skbs already allocated... | ||
307 | */ | ||
308 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | ||
309 | { | ||
310 | if (skb) { | ||
311 | BUG_ON(skb_cloned(skb)); | ||
312 | skb_trim(skb, 0); | ||
313 | skb_get(skb); | ||
314 | } else { | ||
315 | skb = alloc_skb(len, gfp); | ||
316 | } | ||
317 | return skb; | ||
318 | } | ||
319 | |||
320 | static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip, | ||
321 | __be32 peer_ip, __be16 local_port, | ||
322 | __be16 peer_port, u8 tos) | ||
323 | { | ||
324 | struct rtable *rt; | ||
325 | struct flowi fl = { | ||
326 | .oif = 0, | ||
327 | .nl_u = { | ||
328 | .ip4_u = { | ||
329 | .daddr = peer_ip, | ||
330 | .saddr = local_ip, | ||
331 | .tos = tos} | ||
332 | }, | ||
333 | .proto = IPPROTO_TCP, | ||
334 | .uli_u = { | ||
335 | .ports = { | ||
336 | .sport = local_port, | ||
337 | .dport = peer_port} | ||
338 | } | ||
339 | }; | ||
340 | |||
341 | if (ip_route_output_flow(&rt, &fl, NULL, 0)) | ||
342 | return NULL; | ||
343 | return rt; | ||
344 | } | ||
345 | |||
346 | static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) | ||
347 | { | ||
348 | int i = 0; | ||
349 | |||
350 | while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) | ||
351 | ++i; | ||
352 | return i; | ||
353 | } | ||
354 | |||
355 | static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) | ||
356 | { | ||
357 | PDBG("%s t3cdev %p\n", __FUNCTION__, dev); | ||
358 | kfree_skb(skb); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Handle an ARP failure for an active open. | ||
363 | */ | ||
364 | static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | ||
365 | { | ||
366 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | ||
367 | kfree_skb(skb); | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | ||
372 | * and send it along. | ||
373 | */ | ||
374 | static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | ||
375 | { | ||
376 | struct cpl_abort_req *req = cplhdr(skb); | ||
377 | |||
378 | PDBG("%s t3cdev %p\n", __FUNCTION__, dev); | ||
379 | req->cmd = CPL_ABORT_NO_RST; | ||
380 | cxgb3_ofld_send(dev, skb); | ||
381 | } | ||
382 | |||
383 | static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) | ||
384 | { | ||
385 | struct cpl_close_con_req *req; | ||
386 | struct sk_buff *skb; | ||
387 | |||
388 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
389 | skb = get_skb(NULL, sizeof(*req), gfp); | ||
390 | if (!skb) { | ||
391 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); | ||
392 | return -ENOMEM; | ||
393 | } | ||
394 | skb->priority = CPL_PRIORITY_DATA; | ||
395 | set_arp_failure_handler(skb, arp_failure_discard); | ||
396 | req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); | ||
397 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | ||
398 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
399 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); | ||
400 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | ||
405 | { | ||
406 | struct cpl_abort_req *req; | ||
407 | |||
408 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
409 | skb = get_skb(skb, sizeof(*req), gfp); | ||
410 | if (!skb) { | ||
411 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | ||
412 | __FUNCTION__); | ||
413 | return -ENOMEM; | ||
414 | } | ||
415 | skb->priority = CPL_PRIORITY_DATA; | ||
416 | set_arp_failure_handler(skb, abort_arp_failure); | ||
417 | req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); | ||
418 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | ||
419 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
420 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | ||
421 | req->cmd = CPL_ABORT_SEND_RST; | ||
422 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static int send_connect(struct iwch_ep *ep) | ||
427 | { | ||
428 | struct cpl_act_open_req *req; | ||
429 | struct sk_buff *skb; | ||
430 | u32 opt0h, opt0l, opt2; | ||
431 | unsigned int mtu_idx; | ||
432 | int wscale; | ||
433 | |||
434 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
435 | |||
436 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
437 | if (!skb) { | ||
438 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | ||
439 | __FUNCTION__); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); | ||
443 | wscale = compute_wscale(rcv_win); | ||
444 | opt0h = V_NAGLE(0) | | ||
445 | V_NO_CONG(nocong) | | ||
446 | V_KEEP_ALIVE(1) | | ||
447 | F_TCAM_BYPASS | | ||
448 | V_WND_SCALE(wscale) | | ||
449 | V_MSS_IDX(mtu_idx) | | ||
450 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | ||
451 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | ||
452 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | ||
453 | skb->priority = CPL_PRIORITY_SETUP; | ||
454 | set_arp_failure_handler(skb, act_open_req_arp_failure); | ||
455 | |||
456 | req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); | ||
457 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
458 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); | ||
459 | req->local_port = ep->com.local_addr.sin_port; | ||
460 | req->peer_port = ep->com.remote_addr.sin_port; | ||
461 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | ||
462 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | ||
463 | req->opt0h = htonl(opt0h); | ||
464 | req->opt0l = htonl(opt0l); | ||
465 | req->params = 0; | ||
466 | req->opt2 = htonl(opt2); | ||
467 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | ||
472 | { | ||
473 | int mpalen; | ||
474 | struct tx_data_wr *req; | ||
475 | struct mpa_message *mpa; | ||
476 | int len; | ||
477 | |||
478 | PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); | ||
479 | |||
480 | BUG_ON(skb_cloned(skb)); | ||
481 | |||
482 | mpalen = sizeof(*mpa) + ep->plen; | ||
483 | if (skb->data + mpalen + sizeof(*req) > skb->end) { | ||
484 | kfree_skb(skb); | ||
485 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); | ||
486 | if (!skb) { | ||
487 | connect_reply_upcall(ep, -ENOMEM); | ||
488 | return; | ||
489 | } | ||
490 | } | ||
491 | skb_trim(skb, 0); | ||
492 | skb_reserve(skb, sizeof(*req)); | ||
493 | skb_put(skb, mpalen); | ||
494 | skb->priority = CPL_PRIORITY_DATA; | ||
495 | mpa = (struct mpa_message *) skb->data; | ||
496 | memset(mpa, 0, sizeof(*mpa)); | ||
497 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | ||
498 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | ||
499 | (markers_enabled ? MPA_MARKERS : 0); | ||
500 | mpa->private_data_size = htons(ep->plen); | ||
501 | mpa->revision = mpa_rev; | ||
502 | |||
503 | if (ep->plen) | ||
504 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | ||
505 | |||
506 | /* | ||
507 | * Reference the mpa skb. This ensures the data area | ||
508 | * will remain in memory until the hw acks the tx. | ||
509 | * Function tx_ack() will deref it. | ||
510 | */ | ||
511 | skb_get(skb); | ||
512 | set_arp_failure_handler(skb, arp_failure_discard); | ||
513 | skb->h.raw = skb->data; | ||
514 | len = skb->len; | ||
515 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | ||
516 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | ||
517 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
518 | req->len = htonl(len); | ||
519 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | ||
520 | V_TX_SNDBUF(snd_win>>15)); | ||
521 | req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); | ||
522 | req->sndseq = htonl(ep->snd_seq); | ||
523 | BUG_ON(ep->mpa_skb); | ||
524 | ep->mpa_skb = skb; | ||
525 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
526 | start_ep_timer(ep); | ||
527 | state_set(&ep->com, MPA_REQ_SENT); | ||
528 | return; | ||
529 | } | ||
530 | |||
531 | static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) | ||
532 | { | ||
533 | int mpalen; | ||
534 | struct tx_data_wr *req; | ||
535 | struct mpa_message *mpa; | ||
536 | struct sk_buff *skb; | ||
537 | |||
538 | PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); | ||
539 | |||
540 | mpalen = sizeof(*mpa) + plen; | ||
541 | |||
542 | skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); | ||
543 | if (!skb) { | ||
544 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); | ||
545 | return -ENOMEM; | ||
546 | } | ||
547 | skb_reserve(skb, sizeof(*req)); | ||
548 | mpa = (struct mpa_message *) skb_put(skb, mpalen); | ||
549 | memset(mpa, 0, sizeof(*mpa)); | ||
550 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | ||
551 | mpa->flags = MPA_REJECT; | ||
552 | mpa->revision = mpa_rev; | ||
553 | mpa->private_data_size = htons(plen); | ||
554 | if (plen) | ||
555 | memcpy(mpa->private_data, pdata, plen); | ||
556 | |||
557 | /* | ||
558 | * Reference the mpa skb again. This ensures the data area | ||
559 | * will remain in memory until the hw acks the tx. | ||
560 | * Function tx_ack() will deref it. | ||
561 | */ | ||
562 | skb_get(skb); | ||
563 | skb->priority = CPL_PRIORITY_DATA; | ||
564 | set_arp_failure_handler(skb, arp_failure_discard); | ||
565 | skb->h.raw = skb->data; | ||
566 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | ||
567 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | ||
568 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
569 | req->len = htonl(mpalen); | ||
570 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | ||
571 | V_TX_SNDBUF(snd_win>>15)); | ||
572 | req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); | ||
573 | req->sndseq = htonl(ep->snd_seq); | ||
574 | BUG_ON(ep->mpa_skb); | ||
575 | ep->mpa_skb = skb; | ||
576 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | ||
581 | { | ||
582 | int mpalen; | ||
583 | struct tx_data_wr *req; | ||
584 | struct mpa_message *mpa; | ||
585 | int len; | ||
586 | struct sk_buff *skb; | ||
587 | |||
588 | PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); | ||
589 | |||
590 | mpalen = sizeof(*mpa) + plen; | ||
591 | |||
592 | skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); | ||
593 | if (!skb) { | ||
594 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); | ||
595 | return -ENOMEM; | ||
596 | } | ||
597 | skb->priority = CPL_PRIORITY_DATA; | ||
598 | skb_reserve(skb, sizeof(*req)); | ||
599 | mpa = (struct mpa_message *) skb_put(skb, mpalen); | ||
600 | memset(mpa, 0, sizeof(*mpa)); | ||
601 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | ||
602 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | ||
603 | (markers_enabled ? MPA_MARKERS : 0); | ||
604 | mpa->revision = mpa_rev; | ||
605 | mpa->private_data_size = htons(plen); | ||
606 | if (plen) | ||
607 | memcpy(mpa->private_data, pdata, plen); | ||
608 | |||
609 | /* | ||
610 | * Reference the mpa skb. This ensures the data area | ||
611 | * will remain in memory until the hw acks the tx. | ||
612 | * Function tx_ack() will deref it. | ||
613 | */ | ||
614 | skb_get(skb); | ||
615 | set_arp_failure_handler(skb, arp_failure_discard); | ||
616 | skb->h.raw = skb->data; | ||
617 | len = skb->len; | ||
618 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | ||
619 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | ||
620 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
621 | req->len = htonl(len); | ||
622 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | ||
623 | V_TX_SNDBUF(snd_win>>15)); | ||
624 | req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT); | ||
625 | req->sndseq = htonl(ep->snd_seq); | ||
626 | ep->mpa_skb = skb; | ||
627 | state_set(&ep->com, MPA_REP_SENT); | ||
628 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
633 | { | ||
634 | struct iwch_ep *ep = ctx; | ||
635 | struct cpl_act_establish *req = cplhdr(skb); | ||
636 | unsigned int tid = GET_TID(req); | ||
637 | |||
638 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); | ||
639 | |||
640 | dst_confirm(ep->dst); | ||
641 | |||
642 | /* setup the hwtid for this connection */ | ||
643 | ep->hwtid = tid; | ||
644 | cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid); | ||
645 | |||
646 | ep->snd_seq = ntohl(req->snd_isn); | ||
647 | |||
648 | set_emss(ep, ntohs(req->tcp_opt)); | ||
649 | |||
650 | /* dealloc the atid */ | ||
651 | cxgb3_free_atid(ep->com.tdev, ep->atid); | ||
652 | |||
653 | /* start MPA negotiation */ | ||
654 | send_mpa_req(ep, skb); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | ||
660 | { | ||
661 | PDBG("%s ep %p\n", __FILE__, ep); | ||
662 | state_set(&ep->com, ABORTING); | ||
663 | send_abort(ep, skb, gfp); | ||
664 | } | ||
665 | |||
666 | static void close_complete_upcall(struct iwch_ep *ep) | ||
667 | { | ||
668 | struct iw_cm_event event; | ||
669 | |||
670 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
671 | memset(&event, 0, sizeof(event)); | ||
672 | event.event = IW_CM_EVENT_CLOSE; | ||
673 | if (ep->com.cm_id) { | ||
674 | PDBG("close complete delivered ep %p cm_id %p tid %d\n", | ||
675 | ep, ep->com.cm_id, ep->hwtid); | ||
676 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
677 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
678 | ep->com.cm_id = NULL; | ||
679 | ep->com.qp = NULL; | ||
680 | } | ||
681 | } | ||
682 | |||
683 | static void peer_close_upcall(struct iwch_ep *ep) | ||
684 | { | ||
685 | struct iw_cm_event event; | ||
686 | |||
687 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
688 | memset(&event, 0, sizeof(event)); | ||
689 | event.event = IW_CM_EVENT_DISCONNECT; | ||
690 | if (ep->com.cm_id) { | ||
691 | PDBG("peer close delivered ep %p cm_id %p tid %d\n", | ||
692 | ep, ep->com.cm_id, ep->hwtid); | ||
693 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
694 | } | ||
695 | } | ||
696 | |||
697 | static void peer_abort_upcall(struct iwch_ep *ep) | ||
698 | { | ||
699 | struct iw_cm_event event; | ||
700 | |||
701 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
702 | memset(&event, 0, sizeof(event)); | ||
703 | event.event = IW_CM_EVENT_CLOSE; | ||
704 | event.status = -ECONNRESET; | ||
705 | if (ep->com.cm_id) { | ||
706 | PDBG("abort delivered ep %p cm_id %p tid %d\n", ep, | ||
707 | ep->com.cm_id, ep->hwtid); | ||
708 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
709 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
710 | ep->com.cm_id = NULL; | ||
711 | ep->com.qp = NULL; | ||
712 | } | ||
713 | } | ||
714 | |||
715 | static void connect_reply_upcall(struct iwch_ep *ep, int status) | ||
716 | { | ||
717 | struct iw_cm_event event; | ||
718 | |||
719 | PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); | ||
720 | memset(&event, 0, sizeof(event)); | ||
721 | event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
722 | event.status = status; | ||
723 | event.local_addr = ep->com.local_addr; | ||
724 | event.remote_addr = ep->com.remote_addr; | ||
725 | |||
726 | if ((status == 0) || (status == -ECONNREFUSED)) { | ||
727 | event.private_data_len = ep->plen; | ||
728 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
729 | } | ||
730 | if (ep->com.cm_id) { | ||
731 | PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, | ||
732 | ep->hwtid, status); | ||
733 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
734 | } | ||
735 | if (status < 0) { | ||
736 | ep->com.cm_id->rem_ref(ep->com.cm_id); | ||
737 | ep->com.cm_id = NULL; | ||
738 | ep->com.qp = NULL; | ||
739 | } | ||
740 | } | ||
741 | |||
742 | static void connect_request_upcall(struct iwch_ep *ep) | ||
743 | { | ||
744 | struct iw_cm_event event; | ||
745 | |||
746 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | ||
747 | memset(&event, 0, sizeof(event)); | ||
748 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | ||
749 | event.local_addr = ep->com.local_addr; | ||
750 | event.remote_addr = ep->com.remote_addr; | ||
751 | event.private_data_len = ep->plen; | ||
752 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | ||
753 | event.provider_data = ep; | ||
754 | if (state_read(&ep->parent_ep->com) != DEAD) | ||
755 | ep->parent_ep->com.cm_id->event_handler( | ||
756 | ep->parent_ep->com.cm_id, | ||
757 | &event); | ||
758 | put_ep(&ep->parent_ep->com); | ||
759 | ep->parent_ep = NULL; | ||
760 | } | ||
761 | |||
762 | static void established_upcall(struct iwch_ep *ep) | ||
763 | { | ||
764 | struct iw_cm_event event; | ||
765 | |||
766 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
767 | memset(&event, 0, sizeof(event)); | ||
768 | event.event = IW_CM_EVENT_ESTABLISHED; | ||
769 | if (ep->com.cm_id) { | ||
770 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | ||
771 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | ||
772 | } | ||
773 | } | ||
774 | |||
775 | static int update_rx_credits(struct iwch_ep *ep, u32 credits) | ||
776 | { | ||
777 | struct cpl_rx_data_ack *req; | ||
778 | struct sk_buff *skb; | ||
779 | |||
780 | PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); | ||
781 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
782 | if (!skb) { | ||
783 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | ||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); | ||
788 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
789 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); | ||
790 | req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); | ||
791 | skb->priority = CPL_PRIORITY_ACK; | ||
792 | ep->com.tdev->send(ep->com.tdev, skb); | ||
793 | return credits; | ||
794 | } | ||
795 | |||
796 | static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | ||
797 | { | ||
798 | struct mpa_message *mpa; | ||
799 | u16 plen; | ||
800 | struct iwch_qp_attributes attrs; | ||
801 | enum iwch_qp_attr_mask mask; | ||
802 | int err; | ||
803 | |||
804 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
805 | |||
806 | /* | ||
807 | * Stop mpa timer. If it expired, then the state has | ||
808 | * changed and we bail since ep_timeout already aborted | ||
809 | * the connection. | ||
810 | */ | ||
811 | stop_ep_timer(ep); | ||
812 | if (state_read(&ep->com) != MPA_REQ_SENT) | ||
813 | return; | ||
814 | |||
815 | /* | ||
816 | * If we get more than the supported amount of private data | ||
817 | * then we must fail this connection. | ||
818 | */ | ||
819 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | ||
820 | err = -EINVAL; | ||
821 | goto err; | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * copy the new data into our accumulation buffer. | ||
826 | */ | ||
827 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | ||
828 | ep->mpa_pkt_len += skb->len; | ||
829 | |||
830 | /* | ||
831 | * if we don't even have the mpa message, then bail. | ||
832 | */ | ||
833 | if (ep->mpa_pkt_len < sizeof(*mpa)) | ||
834 | return; | ||
835 | mpa = (struct mpa_message *) ep->mpa_pkt; | ||
836 | |||
837 | /* Validate MPA header. */ | ||
838 | if (mpa->revision != mpa_rev) { | ||
839 | err = -EPROTO; | ||
840 | goto err; | ||
841 | } | ||
842 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | ||
843 | err = -EPROTO; | ||
844 | goto err; | ||
845 | } | ||
846 | |||
847 | plen = ntohs(mpa->private_data_size); | ||
848 | |||
849 | /* | ||
850 | * Fail if there's too much private data. | ||
851 | */ | ||
852 | if (plen > MPA_MAX_PRIVATE_DATA) { | ||
853 | err = -EPROTO; | ||
854 | goto err; | ||
855 | } | ||
856 | |||
857 | /* | ||
858 | * If plen does not account for pkt size | ||
859 | */ | ||
860 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | ||
861 | err = -EPROTO; | ||
862 | goto err; | ||
863 | } | ||
864 | |||
865 | ep->plen = (u8) plen; | ||
866 | |||
867 | /* | ||
868 | * If we don't have all the pdata yet, then bail. | ||
869 | * We'll continue process when more data arrives. | ||
870 | */ | ||
871 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | ||
872 | return; | ||
873 | |||
874 | if (mpa->flags & MPA_REJECT) { | ||
875 | err = -ECONNREFUSED; | ||
876 | goto err; | ||
877 | } | ||
878 | |||
879 | /* | ||
880 | * If we get here we have accumulated the entire mpa | ||
881 | * start reply message including private data. And | ||
882 | * the MPA header is valid. | ||
883 | */ | ||
884 | state_set(&ep->com, FPDU_MODE); | ||
885 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | ||
886 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | ||
887 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | ||
888 | ep->mpa_attr.version = mpa_rev; | ||
889 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | ||
890 | "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, | ||
891 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | ||
892 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | ||
893 | |||
894 | attrs.mpa_attr = ep->mpa_attr; | ||
895 | attrs.max_ird = ep->ird; | ||
896 | attrs.max_ord = ep->ord; | ||
897 | attrs.llp_stream_handle = ep; | ||
898 | attrs.next_state = IWCH_QP_STATE_RTS; | ||
899 | |||
900 | mask = IWCH_QP_ATTR_NEXT_STATE | | ||
901 | IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR | | ||
902 | IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD; | ||
903 | |||
904 | /* bind QP and TID with INIT_WR */ | ||
905 | err = iwch_modify_qp(ep->com.qp->rhp, | ||
906 | ep->com.qp, mask, &attrs, 1); | ||
907 | if (!err) | ||
908 | goto out; | ||
909 | err: | ||
910 | abort_connection(ep, skb, GFP_KERNEL); | ||
911 | out: | ||
912 | connect_reply_upcall(ep, err); | ||
913 | return; | ||
914 | } | ||
915 | |||
916 | static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) | ||
917 | { | ||
918 | struct mpa_message *mpa; | ||
919 | u16 plen; | ||
920 | |||
921 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
922 | |||
923 | /* | ||
924 | * Stop mpa timer. If it expired, then the state has | ||
925 | * changed and we bail since ep_timeout already aborted | ||
926 | * the connection. | ||
927 | */ | ||
928 | stop_ep_timer(ep); | ||
929 | if (state_read(&ep->com) != MPA_REQ_WAIT) | ||
930 | return; | ||
931 | |||
932 | /* | ||
933 | * If we get more than the supported amount of private data | ||
934 | * then we must fail this connection. | ||
935 | */ | ||
936 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | ||
937 | abort_connection(ep, skb, GFP_KERNEL); | ||
938 | return; | ||
939 | } | ||
940 | |||
941 | PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); | ||
942 | |||
943 | /* | ||
944 | * Copy the new data into our accumulation buffer. | ||
945 | */ | ||
946 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | ||
947 | ep->mpa_pkt_len += skb->len; | ||
948 | |||
949 | /* | ||
950 | * If we don't even have the mpa message, then bail. | ||
951 | * We'll continue process when more data arrives. | ||
952 | */ | ||
953 | if (ep->mpa_pkt_len < sizeof(*mpa)) | ||
954 | return; | ||
955 | PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); | ||
956 | mpa = (struct mpa_message *) ep->mpa_pkt; | ||
957 | |||
958 | /* | ||
959 | * Validate MPA Header. | ||
960 | */ | ||
961 | if (mpa->revision != mpa_rev) { | ||
962 | abort_connection(ep, skb, GFP_KERNEL); | ||
963 | return; | ||
964 | } | ||
965 | |||
966 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | ||
967 | abort_connection(ep, skb, GFP_KERNEL); | ||
968 | return; | ||
969 | } | ||
970 | |||
971 | plen = ntohs(mpa->private_data_size); | ||
972 | |||
973 | /* | ||
974 | * Fail if there's too much private data. | ||
975 | */ | ||
976 | if (plen > MPA_MAX_PRIVATE_DATA) { | ||
977 | abort_connection(ep, skb, GFP_KERNEL); | ||
978 | return; | ||
979 | } | ||
980 | |||
981 | /* | ||
982 | * If plen does not account for pkt size | ||
983 | */ | ||
984 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | ||
985 | abort_connection(ep, skb, GFP_KERNEL); | ||
986 | return; | ||
987 | } | ||
988 | ep->plen = (u8) plen; | ||
989 | |||
990 | /* | ||
991 | * If we don't have all the pdata yet, then bail. | ||
992 | */ | ||
993 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | ||
994 | return; | ||
995 | |||
996 | /* | ||
997 | * If we get here we have accumulated the entire mpa | ||
998 | * start reply message including private data. | ||
999 | */ | ||
1000 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | ||
1001 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | ||
1002 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | ||
1003 | ep->mpa_attr.version = mpa_rev; | ||
1004 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | ||
1005 | "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, | ||
1006 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | ||
1007 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | ||
1008 | |||
1009 | state_set(&ep->com, MPA_REQ_RCVD); | ||
1010 | |||
1011 | /* drive upcall */ | ||
1012 | connect_request_upcall(ep); | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1017 | { | ||
1018 | struct iwch_ep *ep = ctx; | ||
1019 | struct cpl_rx_data *hdr = cplhdr(skb); | ||
1020 | unsigned int dlen = ntohs(hdr->len); | ||
1021 | |||
1022 | PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); | ||
1023 | |||
1024 | skb_pull(skb, sizeof(*hdr)); | ||
1025 | skb_trim(skb, dlen); | ||
1026 | |||
1027 | switch (state_read(&ep->com)) { | ||
1028 | case MPA_REQ_SENT: | ||
1029 | process_mpa_reply(ep, skb); | ||
1030 | break; | ||
1031 | case MPA_REQ_WAIT: | ||
1032 | process_mpa_request(ep, skb); | ||
1033 | break; | ||
1034 | case MPA_REP_SENT: | ||
1035 | break; | ||
1036 | default: | ||
1037 | printk(KERN_ERR MOD "%s Unexpected streaming data." | ||
1038 | " ep %p state %d tid %d\n", | ||
1039 | __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); | ||
1040 | |||
1041 | /* | ||
1042 | * The ep will timeout and inform the ULP of the failure. | ||
1043 | * See ep_timeout(). | ||
1044 | */ | ||
1045 | break; | ||
1046 | } | ||
1047 | |||
1048 | /* update RX credits */ | ||
1049 | update_rx_credits(ep, dlen); | ||
1050 | |||
1051 | return CPL_RET_BUF_DONE; | ||
1052 | } | ||
1053 | |||
1054 | /* | ||
1055 | * Upcall from the adapter indicating data has been transmitted. | ||
1056 | * For us its just the single MPA request or reply. We can now free | ||
1057 | * the skb holding the mpa message. | ||
1058 | */ | ||
1059 | static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1060 | { | ||
1061 | struct iwch_ep *ep = ctx; | ||
1062 | struct cpl_wr_ack *hdr = cplhdr(skb); | ||
1063 | unsigned int credits = ntohs(hdr->credits); | ||
1064 | enum iwch_qp_attr_mask mask; | ||
1065 | |||
1066 | PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); | ||
1067 | |||
1068 | if (credits == 0) | ||
1069 | return CPL_RET_BUF_DONE; | ||
1070 | BUG_ON(credits != 1); | ||
1071 | BUG_ON(ep->mpa_skb == NULL); | ||
1072 | kfree_skb(ep->mpa_skb); | ||
1073 | ep->mpa_skb = NULL; | ||
1074 | dst_confirm(ep->dst); | ||
1075 | if (state_read(&ep->com) == MPA_REP_SENT) { | ||
1076 | struct iwch_qp_attributes attrs; | ||
1077 | |||
1078 | /* bind QP to EP and move to RTS */ | ||
1079 | attrs.mpa_attr = ep->mpa_attr; | ||
1080 | attrs.max_ird = ep->ord; | ||
1081 | attrs.max_ord = ep->ord; | ||
1082 | attrs.llp_stream_handle = ep; | ||
1083 | attrs.next_state = IWCH_QP_STATE_RTS; | ||
1084 | |||
1085 | /* bind QP and TID with INIT_WR */ | ||
1086 | mask = IWCH_QP_ATTR_NEXT_STATE | | ||
1087 | IWCH_QP_ATTR_LLP_STREAM_HANDLE | | ||
1088 | IWCH_QP_ATTR_MPA_ATTR | | ||
1089 | IWCH_QP_ATTR_MAX_IRD | | ||
1090 | IWCH_QP_ATTR_MAX_ORD; | ||
1091 | |||
1092 | ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp, | ||
1093 | ep->com.qp, mask, &attrs, 1); | ||
1094 | |||
1095 | if (!ep->com.rpl_err) { | ||
1096 | state_set(&ep->com, FPDU_MODE); | ||
1097 | established_upcall(ep); | ||
1098 | } | ||
1099 | |||
1100 | ep->com.rpl_done = 1; | ||
1101 | PDBG("waking up ep %p\n", ep); | ||
1102 | wake_up(&ep->com.waitq); | ||
1103 | } | ||
1104 | return CPL_RET_BUF_DONE; | ||
1105 | } | ||
1106 | |||
1107 | static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1108 | { | ||
1109 | struct iwch_ep *ep = ctx; | ||
1110 | |||
1111 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1112 | |||
1113 | close_complete_upcall(ep); | ||
1114 | state_set(&ep->com, DEAD); | ||
1115 | release_ep_resources(ep); | ||
1116 | return CPL_RET_BUF_DONE; | ||
1117 | } | ||
1118 | |||
1119 | static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1120 | { | ||
1121 | struct iwch_ep *ep = ctx; | ||
1122 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | ||
1123 | |||
1124 | PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, | ||
1125 | status2errno(rpl->status)); | ||
1126 | connect_reply_upcall(ep, status2errno(rpl->status)); | ||
1127 | state_set(&ep->com, DEAD); | ||
1128 | if (ep->com.tdev->type == T3B) | ||
1129 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); | ||
1130 | cxgb3_free_atid(ep->com.tdev, ep->atid); | ||
1131 | dst_release(ep->dst); | ||
1132 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
1133 | put_ep(&ep->com); | ||
1134 | return CPL_RET_BUF_DONE; | ||
1135 | } | ||
1136 | |||
1137 | static int listen_start(struct iwch_listen_ep *ep) | ||
1138 | { | ||
1139 | struct sk_buff *skb; | ||
1140 | struct cpl_pass_open_req *req; | ||
1141 | |||
1142 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1143 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
1144 | if (!skb) { | ||
1145 | printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); | ||
1146 | return -ENOMEM; | ||
1147 | } | ||
1148 | |||
1149 | req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); | ||
1150 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1151 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); | ||
1152 | req->local_port = ep->com.local_addr.sin_port; | ||
1153 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | ||
1154 | req->peer_port = 0; | ||
1155 | req->peer_ip = 0; | ||
1156 | req->peer_netmask = 0; | ||
1157 | req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS); | ||
1158 | req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10)); | ||
1159 | req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); | ||
1160 | |||
1161 | skb->priority = 1; | ||
1162 | ep->com.tdev->send(ep->com.tdev, skb); | ||
1163 | return 0; | ||
1164 | } | ||
1165 | |||
1166 | static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1167 | { | ||
1168 | struct iwch_listen_ep *ep = ctx; | ||
1169 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | ||
1170 | |||
1171 | PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, | ||
1172 | rpl->status, status2errno(rpl->status)); | ||
1173 | ep->com.rpl_err = status2errno(rpl->status); | ||
1174 | ep->com.rpl_done = 1; | ||
1175 | wake_up(&ep->com.waitq); | ||
1176 | |||
1177 | return CPL_RET_BUF_DONE; | ||
1178 | } | ||
1179 | |||
1180 | static int listen_stop(struct iwch_listen_ep *ep) | ||
1181 | { | ||
1182 | struct sk_buff *skb; | ||
1183 | struct cpl_close_listserv_req *req; | ||
1184 | |||
1185 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1186 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | ||
1187 | if (!skb) { | ||
1188 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); | ||
1189 | return -ENOMEM; | ||
1190 | } | ||
1191 | req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); | ||
1192 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1193 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); | ||
1194 | skb->priority = 1; | ||
1195 | ep->com.tdev->send(ep->com.tdev, skb); | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, | ||
1200 | void *ctx) | ||
1201 | { | ||
1202 | struct iwch_listen_ep *ep = ctx; | ||
1203 | struct cpl_close_listserv_rpl *rpl = cplhdr(skb); | ||
1204 | |||
1205 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1206 | ep->com.rpl_err = status2errno(rpl->status); | ||
1207 | ep->com.rpl_done = 1; | ||
1208 | wake_up(&ep->com.waitq); | ||
1209 | return CPL_RET_BUF_DONE; | ||
1210 | } | ||
1211 | |||
1212 | static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) | ||
1213 | { | ||
1214 | struct cpl_pass_accept_rpl *rpl; | ||
1215 | unsigned int mtu_idx; | ||
1216 | u32 opt0h, opt0l, opt2; | ||
1217 | int wscale; | ||
1218 | |||
1219 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1220 | BUG_ON(skb_cloned(skb)); | ||
1221 | skb_trim(skb, sizeof(*rpl)); | ||
1222 | skb_get(skb); | ||
1223 | mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); | ||
1224 | wscale = compute_wscale(rcv_win); | ||
1225 | opt0h = V_NAGLE(0) | | ||
1226 | V_NO_CONG(nocong) | | ||
1227 | V_KEEP_ALIVE(1) | | ||
1228 | F_TCAM_BYPASS | | ||
1229 | V_WND_SCALE(wscale) | | ||
1230 | V_MSS_IDX(mtu_idx) | | ||
1231 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | ||
1232 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | ||
1233 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | ||
1234 | |||
1235 | rpl = cplhdr(skb); | ||
1236 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1237 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); | ||
1238 | rpl->peer_ip = peer_ip; | ||
1239 | rpl->opt0h = htonl(opt0h); | ||
1240 | rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT); | ||
1241 | rpl->opt2 = htonl(opt2); | ||
1242 | rpl->rsvd = rpl->opt2; /* workaround for HW bug */ | ||
1243 | skb->priority = CPL_PRIORITY_SETUP; | ||
1244 | l2t_send(ep->com.tdev, skb, ep->l2t); | ||
1245 | |||
1246 | return; | ||
1247 | } | ||
1248 | |||
1249 | static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, | ||
1250 | struct sk_buff *skb) | ||
1251 | { | ||
1252 | PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, | ||
1253 | peer_ip); | ||
1254 | BUG_ON(skb_cloned(skb)); | ||
1255 | skb_trim(skb, sizeof(struct cpl_tid_release)); | ||
1256 | skb_get(skb); | ||
1257 | |||
1258 | if (tdev->type == T3B) | ||
1259 | release_tid(tdev, hwtid, skb); | ||
1260 | else { | ||
1261 | struct cpl_pass_accept_rpl *rpl; | ||
1262 | |||
1263 | rpl = cplhdr(skb); | ||
1264 | skb->priority = CPL_PRIORITY_SETUP; | ||
1265 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1266 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
1267 | hwtid)); | ||
1268 | rpl->peer_ip = peer_ip; | ||
1269 | rpl->opt0h = htonl(F_TCAM_BYPASS); | ||
1270 | rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); | ||
1271 | rpl->opt2 = 0; | ||
1272 | rpl->rsvd = rpl->opt2; | ||
1273 | tdev->send(tdev, skb); | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1278 | { | ||
1279 | struct iwch_ep *child_ep, *parent_ep = ctx; | ||
1280 | struct cpl_pass_accept_req *req = cplhdr(skb); | ||
1281 | unsigned int hwtid = GET_TID(req); | ||
1282 | struct dst_entry *dst; | ||
1283 | struct l2t_entry *l2t; | ||
1284 | struct rtable *rt; | ||
1285 | struct iff_mac tim; | ||
1286 | |||
1287 | PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); | ||
1288 | |||
1289 | if (state_read(&parent_ep->com) != LISTEN) { | ||
1290 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | ||
1291 | __FUNCTION__); | ||
1292 | goto reject; | ||
1293 | } | ||
1294 | |||
1295 | /* | ||
1296 | * Find the netdev for this connection request. | ||
1297 | */ | ||
1298 | tim.mac_addr = req->dst_mac; | ||
1299 | tim.vlan_tag = ntohs(req->vlan_tag); | ||
1300 | if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { | ||
1301 | printk(KERN_ERR | ||
1302 | "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", | ||
1303 | __FUNCTION__, | ||
1304 | req->dst_mac[0], | ||
1305 | req->dst_mac[1], | ||
1306 | req->dst_mac[2], | ||
1307 | req->dst_mac[3], | ||
1308 | req->dst_mac[4], | ||
1309 | req->dst_mac[5]); | ||
1310 | goto reject; | ||
1311 | } | ||
1312 | |||
1313 | /* Find output route */ | ||
1314 | rt = find_route(tdev, | ||
1315 | req->local_ip, | ||
1316 | req->peer_ip, | ||
1317 | req->local_port, | ||
1318 | req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); | ||
1319 | if (!rt) { | ||
1320 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | ||
1321 | __FUNCTION__); | ||
1322 | goto reject; | ||
1323 | } | ||
1324 | dst = &rt->u.dst; | ||
1325 | l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); | ||
1326 | if (!l2t) { | ||
1327 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | ||
1328 | __FUNCTION__); | ||
1329 | dst_release(dst); | ||
1330 | goto reject; | ||
1331 | } | ||
1332 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | ||
1333 | if (!child_ep) { | ||
1334 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | ||
1335 | __FUNCTION__); | ||
1336 | l2t_release(L2DATA(tdev), l2t); | ||
1337 | dst_release(dst); | ||
1338 | goto reject; | ||
1339 | } | ||
1340 | state_set(&child_ep->com, CONNECTING); | ||
1341 | child_ep->com.tdev = tdev; | ||
1342 | child_ep->com.cm_id = NULL; | ||
1343 | child_ep->com.local_addr.sin_family = PF_INET; | ||
1344 | child_ep->com.local_addr.sin_port = req->local_port; | ||
1345 | child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; | ||
1346 | child_ep->com.remote_addr.sin_family = PF_INET; | ||
1347 | child_ep->com.remote_addr.sin_port = req->peer_port; | ||
1348 | child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; | ||
1349 | get_ep(&parent_ep->com); | ||
1350 | child_ep->parent_ep = parent_ep; | ||
1351 | child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid)); | ||
1352 | child_ep->l2t = l2t; | ||
1353 | child_ep->dst = dst; | ||
1354 | child_ep->hwtid = hwtid; | ||
1355 | init_timer(&child_ep->timer); | ||
1356 | cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid); | ||
1357 | accept_cr(child_ep, req->peer_ip, skb); | ||
1358 | goto out; | ||
1359 | reject: | ||
1360 | reject_cr(tdev, hwtid, req->peer_ip, skb); | ||
1361 | out: | ||
1362 | return CPL_RET_BUF_DONE; | ||
1363 | } | ||
1364 | |||
1365 | static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1366 | { | ||
1367 | struct iwch_ep *ep = ctx; | ||
1368 | struct cpl_pass_establish *req = cplhdr(skb); | ||
1369 | |||
1370 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1371 | ep->snd_seq = ntohl(req->snd_isn); | ||
1372 | |||
1373 | set_emss(ep, ntohs(req->tcp_opt)); | ||
1374 | |||
1375 | dst_confirm(ep->dst); | ||
1376 | state_set(&ep->com, MPA_REQ_WAIT); | ||
1377 | start_ep_timer(ep); | ||
1378 | |||
1379 | return CPL_RET_BUF_DONE; | ||
1380 | } | ||
1381 | |||
1382 | static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1383 | { | ||
1384 | struct iwch_ep *ep = ctx; | ||
1385 | struct iwch_qp_attributes attrs; | ||
1386 | unsigned long flags; | ||
1387 | int disconnect = 1; | ||
1388 | int release = 0; | ||
1389 | |||
1390 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1391 | dst_confirm(ep->dst); | ||
1392 | |||
1393 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1394 | switch (ep->com.state) { | ||
1395 | case MPA_REQ_WAIT: | ||
1396 | __state_set(&ep->com, CLOSING); | ||
1397 | break; | ||
1398 | case MPA_REQ_SENT: | ||
1399 | __state_set(&ep->com, CLOSING); | ||
1400 | connect_reply_upcall(ep, -ECONNRESET); | ||
1401 | break; | ||
1402 | case MPA_REQ_RCVD: | ||
1403 | |||
1404 | /* | ||
1405 | * We're gonna mark this puppy DEAD, but keep | ||
1406 | * the reference on it until the ULP accepts or | ||
1407 | * rejects the CR. | ||
1408 | */ | ||
1409 | __state_set(&ep->com, CLOSING); | ||
1410 | get_ep(&ep->com); | ||
1411 | break; | ||
1412 | case MPA_REP_SENT: | ||
1413 | __state_set(&ep->com, CLOSING); | ||
1414 | ep->com.rpl_done = 1; | ||
1415 | ep->com.rpl_err = -ECONNRESET; | ||
1416 | PDBG("waking up ep %p\n", ep); | ||
1417 | wake_up(&ep->com.waitq); | ||
1418 | break; | ||
1419 | case FPDU_MODE: | ||
1420 | __state_set(&ep->com, CLOSING); | ||
1421 | attrs.next_state = IWCH_QP_STATE_CLOSING; | ||
1422 | iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1423 | IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1424 | peer_close_upcall(ep); | ||
1425 | break; | ||
1426 | case ABORTING: | ||
1427 | disconnect = 0; | ||
1428 | break; | ||
1429 | case CLOSING: | ||
1430 | start_ep_timer(ep); | ||
1431 | __state_set(&ep->com, MORIBUND); | ||
1432 | disconnect = 0; | ||
1433 | break; | ||
1434 | case MORIBUND: | ||
1435 | stop_ep_timer(ep); | ||
1436 | if (ep->com.cm_id && ep->com.qp) { | ||
1437 | attrs.next_state = IWCH_QP_STATE_IDLE; | ||
1438 | iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1439 | IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1440 | } | ||
1441 | close_complete_upcall(ep); | ||
1442 | __state_set(&ep->com, DEAD); | ||
1443 | release = 1; | ||
1444 | disconnect = 0; | ||
1445 | break; | ||
1446 | case DEAD: | ||
1447 | disconnect = 0; | ||
1448 | break; | ||
1449 | default: | ||
1450 | BUG_ON(1); | ||
1451 | } | ||
1452 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1453 | if (disconnect) | ||
1454 | iwch_ep_disconnect(ep, 0, GFP_KERNEL); | ||
1455 | if (release) | ||
1456 | release_ep_resources(ep); | ||
1457 | return CPL_RET_BUF_DONE; | ||
1458 | } | ||
1459 | |||
1460 | /* | ||
1461 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | ||
1462 | */ | ||
1463 | static inline int is_neg_adv_abort(unsigned int status) | ||
1464 | { | ||
1465 | return status == CPL_ERR_RTX_NEG_ADVICE || | ||
1466 | status == CPL_ERR_PERSIST_NEG_ADVICE; | ||
1467 | } | ||
1468 | |||
1469 | static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1470 | { | ||
1471 | struct cpl_abort_req_rss *req = cplhdr(skb); | ||
1472 | struct iwch_ep *ep = ctx; | ||
1473 | struct cpl_abort_rpl *rpl; | ||
1474 | struct sk_buff *rpl_skb; | ||
1475 | struct iwch_qp_attributes attrs; | ||
1476 | int ret; | ||
1477 | int state; | ||
1478 | |||
1479 | if (is_neg_adv_abort(req->status)) { | ||
1480 | PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, | ||
1481 | ep->hwtid); | ||
1482 | t3_l2t_send_event(ep->com.tdev, ep->l2t); | ||
1483 | return CPL_RET_BUF_DONE; | ||
1484 | } | ||
1485 | |||
1486 | state = state_read(&ep->com); | ||
1487 | PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); | ||
1488 | switch (state) { | ||
1489 | case CONNECTING: | ||
1490 | break; | ||
1491 | case MPA_REQ_WAIT: | ||
1492 | break; | ||
1493 | case MPA_REQ_SENT: | ||
1494 | connect_reply_upcall(ep, -ECONNRESET); | ||
1495 | break; | ||
1496 | case MPA_REP_SENT: | ||
1497 | ep->com.rpl_done = 1; | ||
1498 | ep->com.rpl_err = -ECONNRESET; | ||
1499 | PDBG("waking up ep %p\n", ep); | ||
1500 | wake_up(&ep->com.waitq); | ||
1501 | break; | ||
1502 | case MPA_REQ_RCVD: | ||
1503 | |||
1504 | /* | ||
1505 | * We're gonna mark this puppy DEAD, but keep | ||
1506 | * the reference on it until the ULP accepts or | ||
1507 | * rejects the CR. | ||
1508 | */ | ||
1509 | get_ep(&ep->com); | ||
1510 | break; | ||
1511 | case MORIBUND: | ||
1512 | stop_ep_timer(ep); | ||
1513 | case FPDU_MODE: | ||
1514 | case CLOSING: | ||
1515 | if (ep->com.cm_id && ep->com.qp) { | ||
1516 | attrs.next_state = IWCH_QP_STATE_ERROR; | ||
1517 | ret = iwch_modify_qp(ep->com.qp->rhp, | ||
1518 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | ||
1519 | &attrs, 1); | ||
1520 | if (ret) | ||
1521 | printk(KERN_ERR MOD | ||
1522 | "%s - qp <- error failed!\n", | ||
1523 | __FUNCTION__); | ||
1524 | } | ||
1525 | peer_abort_upcall(ep); | ||
1526 | break; | ||
1527 | case ABORTING: | ||
1528 | break; | ||
1529 | case DEAD: | ||
1530 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); | ||
1531 | return CPL_RET_BUF_DONE; | ||
1532 | default: | ||
1533 | BUG_ON(1); | ||
1534 | break; | ||
1535 | } | ||
1536 | dst_confirm(ep->dst); | ||
1537 | |||
1538 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | ||
1539 | if (!rpl_skb) { | ||
1540 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | ||
1541 | __FUNCTION__); | ||
1542 | dst_release(ep->dst); | ||
1543 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
1544 | put_ep(&ep->com); | ||
1545 | return CPL_RET_BUF_DONE; | ||
1546 | } | ||
1547 | rpl_skb->priority = CPL_PRIORITY_DATA; | ||
1548 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | ||
1549 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | ||
1550 | rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | ||
1551 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | ||
1552 | rpl->cmd = CPL_ABORT_NO_RST; | ||
1553 | ep->com.tdev->send(ep->com.tdev, rpl_skb); | ||
1554 | if (state != ABORTING) { | ||
1555 | state_set(&ep->com, DEAD); | ||
1556 | release_ep_resources(ep); | ||
1557 | } | ||
1558 | return CPL_RET_BUF_DONE; | ||
1559 | } | ||
1560 | |||
1561 | static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1562 | { | ||
1563 | struct iwch_ep *ep = ctx; | ||
1564 | struct iwch_qp_attributes attrs; | ||
1565 | unsigned long flags; | ||
1566 | int release = 0; | ||
1567 | |||
1568 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1569 | BUG_ON(!ep); | ||
1570 | |||
1571 | /* The cm_id may be null if we failed to connect */ | ||
1572 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1573 | switch (ep->com.state) { | ||
1574 | case CLOSING: | ||
1575 | start_ep_timer(ep); | ||
1576 | __state_set(&ep->com, MORIBUND); | ||
1577 | break; | ||
1578 | case MORIBUND: | ||
1579 | stop_ep_timer(ep); | ||
1580 | if ((ep->com.cm_id) && (ep->com.qp)) { | ||
1581 | attrs.next_state = IWCH_QP_STATE_IDLE; | ||
1582 | iwch_modify_qp(ep->com.qp->rhp, | ||
1583 | ep->com.qp, | ||
1584 | IWCH_QP_ATTR_NEXT_STATE, | ||
1585 | &attrs, 1); | ||
1586 | } | ||
1587 | close_complete_upcall(ep); | ||
1588 | __state_set(&ep->com, DEAD); | ||
1589 | release = 1; | ||
1590 | break; | ||
1591 | case DEAD: | ||
1592 | default: | ||
1593 | BUG_ON(1); | ||
1594 | break; | ||
1595 | } | ||
1596 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1597 | if (release) | ||
1598 | release_ep_resources(ep); | ||
1599 | return CPL_RET_BUF_DONE; | ||
1600 | } | ||
1601 | |||
1602 | /* | ||
1603 | * T3A does 3 things when a TERM is received: | ||
1604 | * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet | ||
1605 | * 2) generate an async event on the QP with the TERMINATE opcode | ||
1606 | * 3) post a TERMINATE opcde cqe into the associated CQ. | ||
1607 | * | ||
1608 | * For (1), we save the message in the qp for later consumer consumption. | ||
1609 | * For (2), we move the QP into TERMINATE, post a QP event and disconnect. | ||
1610 | * For (3), we toss the CQE in cxio_poll_cq(). | ||
1611 | * | ||
1612 | * terminate() handles case (1)... | ||
1613 | */ | ||
1614 | static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1615 | { | ||
1616 | struct iwch_ep *ep = ctx; | ||
1617 | |||
1618 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1619 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); | ||
1620 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); | ||
1621 | memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len); | ||
1622 | ep->com.qp->attr.terminate_msg_len = skb->len; | ||
1623 | ep->com.qp->attr.is_terminate_local = 0; | ||
1624 | return CPL_RET_BUF_DONE; | ||
1625 | } | ||
1626 | |||
1627 | static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
1628 | { | ||
1629 | struct cpl_rdma_ec_status *rep = cplhdr(skb); | ||
1630 | struct iwch_ep *ep = ctx; | ||
1631 | |||
1632 | PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, | ||
1633 | rep->status); | ||
1634 | if (rep->status) { | ||
1635 | struct iwch_qp_attributes attrs; | ||
1636 | |||
1637 | printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", | ||
1638 | __FUNCTION__, ep->hwtid); | ||
1639 | attrs.next_state = IWCH_QP_STATE_ERROR; | ||
1640 | iwch_modify_qp(ep->com.qp->rhp, | ||
1641 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | ||
1642 | &attrs, 1); | ||
1643 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1644 | } | ||
1645 | return CPL_RET_BUF_DONE; | ||
1646 | } | ||
1647 | |||
1648 | static void ep_timeout(unsigned long arg) | ||
1649 | { | ||
1650 | struct iwch_ep *ep = (struct iwch_ep *)arg; | ||
1651 | struct iwch_qp_attributes attrs; | ||
1652 | unsigned long flags; | ||
1653 | |||
1654 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1655 | PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, | ||
1656 | ep->com.state); | ||
1657 | switch (ep->com.state) { | ||
1658 | case MPA_REQ_SENT: | ||
1659 | connect_reply_upcall(ep, -ETIMEDOUT); | ||
1660 | break; | ||
1661 | case MPA_REQ_WAIT: | ||
1662 | break; | ||
1663 | case MORIBUND: | ||
1664 | if (ep->com.cm_id && ep->com.qp) { | ||
1665 | attrs.next_state = IWCH_QP_STATE_ERROR; | ||
1666 | iwch_modify_qp(ep->com.qp->rhp, | ||
1667 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | ||
1668 | &attrs, 1); | ||
1669 | } | ||
1670 | break; | ||
1671 | default: | ||
1672 | BUG(); | ||
1673 | } | ||
1674 | __state_set(&ep->com, CLOSING); | ||
1675 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1676 | abort_connection(ep, NULL, GFP_ATOMIC); | ||
1677 | put_ep(&ep->com); | ||
1678 | } | ||
1679 | |||
1680 | int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | ||
1681 | { | ||
1682 | int err; | ||
1683 | struct iwch_ep *ep = to_ep(cm_id); | ||
1684 | PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); | ||
1685 | |||
1686 | if (state_read(&ep->com) == DEAD) { | ||
1687 | put_ep(&ep->com); | ||
1688 | return -ECONNRESET; | ||
1689 | } | ||
1690 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | ||
1691 | state_set(&ep->com, CLOSING); | ||
1692 | if (mpa_rev == 0) | ||
1693 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1694 | else { | ||
1695 | err = send_mpa_reject(ep, pdata, pdata_len); | ||
1696 | err = send_halfclose(ep, GFP_KERNEL); | ||
1697 | } | ||
1698 | return 0; | ||
1699 | } | ||
1700 | |||
1701 | int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
1702 | { | ||
1703 | int err; | ||
1704 | struct iwch_qp_attributes attrs; | ||
1705 | enum iwch_qp_attr_mask mask; | ||
1706 | struct iwch_ep *ep = to_ep(cm_id); | ||
1707 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | ||
1708 | struct iwch_qp *qp = get_qhp(h, conn_param->qpn); | ||
1709 | |||
1710 | PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); | ||
1711 | if (state_read(&ep->com) == DEAD) { | ||
1712 | put_ep(&ep->com); | ||
1713 | return -ECONNRESET; | ||
1714 | } | ||
1715 | |||
1716 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | ||
1717 | BUG_ON(!qp); | ||
1718 | |||
1719 | if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || | ||
1720 | (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { | ||
1721 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1722 | return -EINVAL; | ||
1723 | } | ||
1724 | |||
1725 | cm_id->add_ref(cm_id); | ||
1726 | ep->com.cm_id = cm_id; | ||
1727 | ep->com.qp = qp; | ||
1728 | |||
1729 | ep->com.rpl_done = 0; | ||
1730 | ep->com.rpl_err = 0; | ||
1731 | ep->ird = conn_param->ird; | ||
1732 | ep->ord = conn_param->ord; | ||
1733 | PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); | ||
1734 | get_ep(&ep->com); | ||
1735 | err = send_mpa_reply(ep, conn_param->private_data, | ||
1736 | conn_param->private_data_len); | ||
1737 | if (err) { | ||
1738 | ep->com.cm_id = NULL; | ||
1739 | ep->com.qp = NULL; | ||
1740 | cm_id->rem_ref(cm_id); | ||
1741 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1742 | put_ep(&ep->com); | ||
1743 | return err; | ||
1744 | } | ||
1745 | |||
1746 | /* bind QP to EP and move to RTS */ | ||
1747 | attrs.mpa_attr = ep->mpa_attr; | ||
1748 | attrs.max_ird = ep->ord; | ||
1749 | attrs.max_ord = ep->ord; | ||
1750 | attrs.llp_stream_handle = ep; | ||
1751 | attrs.next_state = IWCH_QP_STATE_RTS; | ||
1752 | |||
1753 | /* bind QP and TID with INIT_WR */ | ||
1754 | mask = IWCH_QP_ATTR_NEXT_STATE | | ||
1755 | IWCH_QP_ATTR_LLP_STREAM_HANDLE | | ||
1756 | IWCH_QP_ATTR_MPA_ATTR | | ||
1757 | IWCH_QP_ATTR_MAX_IRD | | ||
1758 | IWCH_QP_ATTR_MAX_ORD; | ||
1759 | |||
1760 | err = iwch_modify_qp(ep->com.qp->rhp, | ||
1761 | ep->com.qp, mask, &attrs, 1); | ||
1762 | |||
1763 | if (err) { | ||
1764 | ep->com.cm_id = NULL; | ||
1765 | ep->com.qp = NULL; | ||
1766 | cm_id->rem_ref(cm_id); | ||
1767 | abort_connection(ep, NULL, GFP_KERNEL); | ||
1768 | } else { | ||
1769 | state_set(&ep->com, FPDU_MODE); | ||
1770 | established_upcall(ep); | ||
1771 | } | ||
1772 | put_ep(&ep->com); | ||
1773 | return err; | ||
1774 | } | ||
1775 | |||
1776 | int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | ||
1777 | { | ||
1778 | int err = 0; | ||
1779 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | ||
1780 | struct iwch_ep *ep; | ||
1781 | struct rtable *rt; | ||
1782 | |||
1783 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | ||
1784 | if (!ep) { | ||
1785 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); | ||
1786 | err = -ENOMEM; | ||
1787 | goto out; | ||
1788 | } | ||
1789 | init_timer(&ep->timer); | ||
1790 | ep->plen = conn_param->private_data_len; | ||
1791 | if (ep->plen) | ||
1792 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | ||
1793 | conn_param->private_data, ep->plen); | ||
1794 | ep->ird = conn_param->ird; | ||
1795 | ep->ord = conn_param->ord; | ||
1796 | ep->com.tdev = h->rdev.t3cdev_p; | ||
1797 | |||
1798 | cm_id->add_ref(cm_id); | ||
1799 | ep->com.cm_id = cm_id; | ||
1800 | ep->com.qp = get_qhp(h, conn_param->qpn); | ||
1801 | BUG_ON(!ep->com.qp); | ||
1802 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, | ||
1803 | ep->com.qp, cm_id); | ||
1804 | |||
1805 | /* | ||
1806 | * Allocate an active TID to initiate a TCP connection. | ||
1807 | */ | ||
1808 | ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); | ||
1809 | if (ep->atid == -1) { | ||
1810 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); | ||
1811 | err = -ENOMEM; | ||
1812 | goto fail2; | ||
1813 | } | ||
1814 | |||
1815 | /* find a route */ | ||
1816 | rt = find_route(h->rdev.t3cdev_p, | ||
1817 | cm_id->local_addr.sin_addr.s_addr, | ||
1818 | cm_id->remote_addr.sin_addr.s_addr, | ||
1819 | cm_id->local_addr.sin_port, | ||
1820 | cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); | ||
1821 | if (!rt) { | ||
1822 | printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); | ||
1823 | err = -EHOSTUNREACH; | ||
1824 | goto fail3; | ||
1825 | } | ||
1826 | ep->dst = &rt->u.dst; | ||
1827 | |||
1828 | /* get a l2t entry */ | ||
1829 | ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, | ||
1830 | ep->dst->neighbour->dev); | ||
1831 | if (!ep->l2t) { | ||
1832 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); | ||
1833 | err = -ENOMEM; | ||
1834 | goto fail4; | ||
1835 | } | ||
1836 | |||
1837 | state_set(&ep->com, CONNECTING); | ||
1838 | ep->tos = IPTOS_LOWDELAY; | ||
1839 | ep->com.local_addr = cm_id->local_addr; | ||
1840 | ep->com.remote_addr = cm_id->remote_addr; | ||
1841 | |||
1842 | /* send connect request to rnic */ | ||
1843 | err = send_connect(ep); | ||
1844 | if (!err) | ||
1845 | goto out; | ||
1846 | |||
1847 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | ||
1848 | fail4: | ||
1849 | dst_release(ep->dst); | ||
1850 | fail3: | ||
1851 | cxgb3_free_atid(ep->com.tdev, ep->atid); | ||
1852 | fail2: | ||
1853 | put_ep(&ep->com); | ||
1854 | out: | ||
1855 | return err; | ||
1856 | } | ||
1857 | |||
1858 | int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) | ||
1859 | { | ||
1860 | int err = 0; | ||
1861 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | ||
1862 | struct iwch_listen_ep *ep; | ||
1863 | |||
1864 | |||
1865 | might_sleep(); | ||
1866 | |||
1867 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | ||
1868 | if (!ep) { | ||
1869 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); | ||
1870 | err = -ENOMEM; | ||
1871 | goto fail1; | ||
1872 | } | ||
1873 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1874 | ep->com.tdev = h->rdev.t3cdev_p; | ||
1875 | cm_id->add_ref(cm_id); | ||
1876 | ep->com.cm_id = cm_id; | ||
1877 | ep->backlog = backlog; | ||
1878 | ep->com.local_addr = cm_id->local_addr; | ||
1879 | |||
1880 | /* | ||
1881 | * Allocate a server TID. | ||
1882 | */ | ||
1883 | ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); | ||
1884 | if (ep->stid == -1) { | ||
1885 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); | ||
1886 | err = -ENOMEM; | ||
1887 | goto fail2; | ||
1888 | } | ||
1889 | |||
1890 | state_set(&ep->com, LISTEN); | ||
1891 | err = listen_start(ep); | ||
1892 | if (err) | ||
1893 | goto fail3; | ||
1894 | |||
1895 | /* wait for pass_open_rpl */ | ||
1896 | wait_event(ep->com.waitq, ep->com.rpl_done); | ||
1897 | err = ep->com.rpl_err; | ||
1898 | if (!err) { | ||
1899 | cm_id->provider_data = ep; | ||
1900 | goto out; | ||
1901 | } | ||
1902 | fail3: | ||
1903 | cxgb3_free_stid(ep->com.tdev, ep->stid); | ||
1904 | fail2: | ||
1905 | put_ep(&ep->com); | ||
1906 | fail1: | ||
1907 | out: | ||
1908 | return err; | ||
1909 | } | ||
1910 | |||
1911 | int iwch_destroy_listen(struct iw_cm_id *cm_id) | ||
1912 | { | ||
1913 | int err; | ||
1914 | struct iwch_listen_ep *ep = to_listen_ep(cm_id); | ||
1915 | |||
1916 | PDBG("%s ep %p\n", __FUNCTION__, ep); | ||
1917 | |||
1918 | might_sleep(); | ||
1919 | state_set(&ep->com, DEAD); | ||
1920 | ep->com.rpl_done = 0; | ||
1921 | ep->com.rpl_err = 0; | ||
1922 | err = listen_stop(ep); | ||
1923 | wait_event(ep->com.waitq, ep->com.rpl_done); | ||
1924 | cxgb3_free_stid(ep->com.tdev, ep->stid); | ||
1925 | err = ep->com.rpl_err; | ||
1926 | cm_id->rem_ref(cm_id); | ||
1927 | put_ep(&ep->com); | ||
1928 | return err; | ||
1929 | } | ||
1930 | |||
1931 | int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) | ||
1932 | { | ||
1933 | int ret=0; | ||
1934 | unsigned long flags; | ||
1935 | int close = 0; | ||
1936 | |||
1937 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1938 | |||
1939 | PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, | ||
1940 | states[ep->com.state], abrupt); | ||
1941 | |||
1942 | if (ep->com.state == DEAD) { | ||
1943 | PDBG("%s already dead ep %p\n", __FUNCTION__, ep); | ||
1944 | goto out; | ||
1945 | } | ||
1946 | |||
1947 | if (abrupt) { | ||
1948 | if (ep->com.state != ABORTING) { | ||
1949 | ep->com.state = ABORTING; | ||
1950 | close = 1; | ||
1951 | } | ||
1952 | goto out; | ||
1953 | } | ||
1954 | |||
1955 | switch (ep->com.state) { | ||
1956 | case MPA_REQ_WAIT: | ||
1957 | case MPA_REQ_SENT: | ||
1958 | case MPA_REQ_RCVD: | ||
1959 | case MPA_REP_SENT: | ||
1960 | case FPDU_MODE: | ||
1961 | ep->com.state = CLOSING; | ||
1962 | close = 1; | ||
1963 | break; | ||
1964 | case CLOSING: | ||
1965 | start_ep_timer(ep); | ||
1966 | ep->com.state = MORIBUND; | ||
1967 | close = 1; | ||
1968 | break; | ||
1969 | case MORIBUND: | ||
1970 | break; | ||
1971 | default: | ||
1972 | BUG(); | ||
1973 | break; | ||
1974 | } | ||
1975 | out: | ||
1976 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1977 | if (close) { | ||
1978 | if (abrupt) | ||
1979 | ret = send_abort(ep, NULL, gfp); | ||
1980 | else | ||
1981 | ret = send_halfclose(ep, gfp); | ||
1982 | } | ||
1983 | return ret; | ||
1984 | } | ||
1985 | |||
1986 | int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | ||
1987 | struct l2t_entry *l2t) | ||
1988 | { | ||
1989 | struct iwch_ep *ep = ctx; | ||
1990 | |||
1991 | if (ep->dst != old) | ||
1992 | return 0; | ||
1993 | |||
1994 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, | ||
1995 | l2t); | ||
1996 | dst_hold(new); | ||
1997 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | ||
1998 | ep->l2t = l2t; | ||
1999 | dst_release(old); | ||
2000 | ep->dst = new; | ||
2001 | return 1; | ||
2002 | } | ||
2003 | |||
2004 | /* | ||
2005 | * All the CM events are handled on a work queue to have a safe context. | ||
2006 | */ | ||
2007 | static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
2008 | { | ||
2009 | struct iwch_ep_common *epc = ctx; | ||
2010 | |||
2011 | get_ep(epc); | ||
2012 | |||
2013 | /* | ||
2014 | * Save ctx and tdev in the skb->cb area. | ||
2015 | */ | ||
2016 | *((void **) skb->cb) = ctx; | ||
2017 | *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev; | ||
2018 | |||
2019 | /* | ||
2020 | * Queue the skb and schedule the worker thread. | ||
2021 | */ | ||
2022 | skb_queue_tail(&rxq, skb); | ||
2023 | queue_work(workq, &skb_work); | ||
2024 | return 0; | ||
2025 | } | ||
2026 | |||
2027 | int __init iwch_cm_init(void) | ||
2028 | { | ||
2029 | skb_queue_head_init(&rxq); | ||
2030 | |||
2031 | workq = create_singlethread_workqueue("iw_cxgb3"); | ||
2032 | if (!workq) | ||
2033 | return -ENOMEM; | ||
2034 | |||
2035 | /* | ||
2036 | * All upcalls from the T3 Core go to sched() to | ||
2037 | * schedule the processing on a work queue. | ||
2038 | */ | ||
2039 | t3c_handlers[CPL_ACT_ESTABLISH] = sched; | ||
2040 | t3c_handlers[CPL_ACT_OPEN_RPL] = sched; | ||
2041 | t3c_handlers[CPL_RX_DATA] = sched; | ||
2042 | t3c_handlers[CPL_TX_DMA_ACK] = sched; | ||
2043 | t3c_handlers[CPL_ABORT_RPL_RSS] = sched; | ||
2044 | t3c_handlers[CPL_ABORT_RPL] = sched; | ||
2045 | t3c_handlers[CPL_PASS_OPEN_RPL] = sched; | ||
2046 | t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; | ||
2047 | t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched; | ||
2048 | t3c_handlers[CPL_PASS_ESTABLISH] = sched; | ||
2049 | t3c_handlers[CPL_PEER_CLOSE] = sched; | ||
2050 | t3c_handlers[CPL_CLOSE_CON_RPL] = sched; | ||
2051 | t3c_handlers[CPL_ABORT_REQ_RSS] = sched; | ||
2052 | t3c_handlers[CPL_RDMA_TERMINATE] = sched; | ||
2053 | t3c_handlers[CPL_RDMA_EC_STATUS] = sched; | ||
2054 | |||
2055 | /* | ||
2056 | * These are the real handlers that are called from a | ||
2057 | * work queue. | ||
2058 | */ | ||
2059 | work_handlers[CPL_ACT_ESTABLISH] = act_establish; | ||
2060 | work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; | ||
2061 | work_handlers[CPL_RX_DATA] = rx_data; | ||
2062 | work_handlers[CPL_TX_DMA_ACK] = tx_ack; | ||
2063 | work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; | ||
2064 | work_handlers[CPL_ABORT_RPL] = abort_rpl; | ||
2065 | work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; | ||
2066 | work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; | ||
2067 | work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; | ||
2068 | work_handlers[CPL_PASS_ESTABLISH] = pass_establish; | ||
2069 | work_handlers[CPL_PEER_CLOSE] = peer_close; | ||
2070 | work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; | ||
2071 | work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; | ||
2072 | work_handlers[CPL_RDMA_TERMINATE] = terminate; | ||
2073 | work_handlers[CPL_RDMA_EC_STATUS] = ec_status; | ||
2074 | return 0; | ||
2075 | } | ||
2076 | |||
2077 | void __exit iwch_cm_term(void) | ||
2078 | { | ||
2079 | flush_workqueue(workq); | ||
2080 | destroy_workqueue(workq); | ||
2081 | } | ||