diff options
-rw-r--r-- | drivers/scsi/cxgbi/libcxgbi.c | 2610 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/libcxgbi.h | 753 |
2 files changed, 3363 insertions, 0 deletions
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c new file mode 100644 index 000000000000..db9d08a831d0 --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -0,0 +1,2610 @@ | |||
1 | /* | ||
2 | * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2010 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | ||
15 | |||
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/crypto.h> | ||
18 | #include <linux/scatterlist.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <scsi/scsi.h> | ||
21 | #include <scsi/scsi_cmnd.h> | ||
22 | #include <scsi/scsi_host.h> | ||
23 | #include <linux/if_vlan.h> | ||
24 | #include <linux/inet.h> | ||
25 | #include <net/dst.h> | ||
26 | #include <net/route.h> | ||
27 | #include <linux/inetdevice.h> /* ip_dev_find */ | ||
28 | #include <net/tcp.h> | ||
29 | |||
30 | static unsigned int dbg_level; | ||
31 | |||
32 | #include "libcxgbi.h" | ||
33 | |||
34 | #define DRV_MODULE_NAME "libcxgbi" | ||
35 | #define DRV_MODULE_DESC "Chelsio iSCSI driver library" | ||
36 | #define DRV_MODULE_VERSION "0.9.0" | ||
37 | #define DRV_MODULE_RELDATE "Jun. 2010" | ||
38 | |||
39 | MODULE_AUTHOR("Chelsio Communications, Inc."); | ||
40 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | ||
41 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
42 | MODULE_LICENSE("GPL"); | ||
43 | |||
44 | module_param(dbg_level, uint, 0644); | ||
45 | MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); | ||
46 | |||
47 | |||
48 | /* | ||
49 | * cxgbi device management | ||
50 | * maintains a list of the cxgbi devices | ||
51 | */ | ||
52 | static LIST_HEAD(cdev_list); | ||
53 | static DEFINE_MUTEX(cdev_mutex); | ||
54 | |||
55 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, | ||
56 | unsigned int max_conn) | ||
57 | { | ||
58 | struct cxgbi_ports_map *pmap = &cdev->pmap; | ||
59 | |||
60 | pmap->port_csk = cxgbi_alloc_big_mem(max_conn * | ||
61 | sizeof(struct cxgbi_sock *), | ||
62 | GFP_KERNEL); | ||
63 | if (!pmap->port_csk) { | ||
64 | pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); | ||
65 | return -ENOMEM; | ||
66 | } | ||
67 | |||
68 | pmap->max_connect = max_conn; | ||
69 | pmap->sport_base = base; | ||
70 | spin_lock_init(&pmap->lock); | ||
71 | return 0; | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); | ||
74 | |||
75 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) | ||
76 | { | ||
77 | struct cxgbi_ports_map *pmap = &cdev->pmap; | ||
78 | struct cxgbi_sock *csk; | ||
79 | int i; | ||
80 | |||
81 | for (i = 0; i < pmap->max_connect; i++) { | ||
82 | if (pmap->port_csk[i]) { | ||
83 | csk = pmap->port_csk[i]; | ||
84 | pmap->port_csk[i] = NULL; | ||
85 | log_debug(1 << CXGBI_DBG_SOCK, | ||
86 | "csk 0x%p, cdev 0x%p, offload down.\n", | ||
87 | csk, cdev); | ||
88 | spin_lock_bh(&csk->lock); | ||
89 | cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); | ||
90 | cxgbi_sock_closed(csk); | ||
91 | spin_unlock_bh(&csk->lock); | ||
92 | cxgbi_sock_put(csk); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); | ||
97 | |||
98 | static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) | ||
99 | { | ||
100 | log_debug(1 << CXGBI_DBG_DEV, | ||
101 | "cdev 0x%p, p# %u.\n", cdev, cdev->nports); | ||
102 | cxgbi_hbas_remove(cdev); | ||
103 | cxgbi_device_portmap_cleanup(cdev); | ||
104 | if (cdev->dev_ddp_cleanup) | ||
105 | cdev->dev_ddp_cleanup(cdev); | ||
106 | else | ||
107 | cxgbi_ddp_cleanup(cdev); | ||
108 | if (cdev->ddp) | ||
109 | cxgbi_ddp_cleanup(cdev); | ||
110 | if (cdev->pmap.max_connect) | ||
111 | cxgbi_free_big_mem(cdev->pmap.port_csk); | ||
112 | kfree(cdev); | ||
113 | } | ||
114 | |||
115 | struct cxgbi_device *cxgbi_device_register(unsigned int extra, | ||
116 | unsigned int nports) | ||
117 | { | ||
118 | struct cxgbi_device *cdev; | ||
119 | |||
120 | cdev = kzalloc(sizeof(*cdev) + extra + nports * | ||
121 | (sizeof(struct cxgbi_hba *) + | ||
122 | sizeof(struct net_device *)), | ||
123 | GFP_KERNEL); | ||
124 | if (!cdev) { | ||
125 | pr_warn("nport %d, OOM.\n", nports); | ||
126 | return NULL; | ||
127 | } | ||
128 | cdev->ports = (struct net_device **)(cdev + 1); | ||
129 | cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * | ||
130 | sizeof(struct net_device *)); | ||
131 | if (extra) | ||
132 | cdev->dd_data = ((char *)cdev->hbas) + | ||
133 | nports * sizeof(struct cxgbi_hba *); | ||
134 | spin_lock_init(&cdev->pmap.lock); | ||
135 | |||
136 | mutex_lock(&cdev_mutex); | ||
137 | list_add_tail(&cdev->list_head, &cdev_list); | ||
138 | mutex_unlock(&cdev_mutex); | ||
139 | |||
140 | log_debug(1 << CXGBI_DBG_DEV, | ||
141 | "cdev 0x%p, p# %u.\n", cdev, nports); | ||
142 | return cdev; | ||
143 | } | ||
144 | EXPORT_SYMBOL_GPL(cxgbi_device_register); | ||
145 | |||
146 | void cxgbi_device_unregister(struct cxgbi_device *cdev) | ||
147 | { | ||
148 | log_debug(1 << CXGBI_DBG_DEV, | ||
149 | "cdev 0x%p, p# %u,%s.\n", | ||
150 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); | ||
151 | mutex_lock(&cdev_mutex); | ||
152 | list_del(&cdev->list_head); | ||
153 | mutex_unlock(&cdev_mutex); | ||
154 | cxgbi_device_destroy(cdev); | ||
155 | } | ||
156 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); | ||
157 | |||
158 | void cxgbi_device_unregister_all(unsigned int flag) | ||
159 | { | ||
160 | struct cxgbi_device *cdev, *tmp; | ||
161 | |||
162 | mutex_lock(&cdev_mutex); | ||
163 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | ||
164 | if ((cdev->flags & flag) == flag) { | ||
165 | log_debug(1 << CXGBI_DBG_DEV, | ||
166 | "cdev 0x%p, p# %u,%s.\n", | ||
167 | cdev, cdev->nports, cdev->nports ? | ||
168 | cdev->ports[0]->name : ""); | ||
169 | list_del(&cdev->list_head); | ||
170 | cxgbi_device_destroy(cdev); | ||
171 | } | ||
172 | } | ||
173 | mutex_unlock(&cdev_mutex); | ||
174 | } | ||
175 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); | ||
176 | |||
177 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) | ||
178 | { | ||
179 | struct cxgbi_device *cdev, *tmp; | ||
180 | |||
181 | mutex_lock(&cdev_mutex); | ||
182 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | ||
183 | if (cdev->lldev == lldev) { | ||
184 | mutex_unlock(&cdev_mutex); | ||
185 | return cdev; | ||
186 | } | ||
187 | } | ||
188 | mutex_unlock(&cdev_mutex); | ||
189 | log_debug(1 << CXGBI_DBG_DEV, | ||
190 | "lldev 0x%p, NO match found.\n", lldev); | ||
191 | return NULL; | ||
192 | } | ||
193 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); | ||
194 | |||
195 | static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, | ||
196 | int *port) | ||
197 | { | ||
198 | struct cxgbi_device *cdev, *tmp; | ||
199 | int i; | ||
200 | |||
201 | if (ndev->priv_flags & IFF_802_1Q_VLAN) | ||
202 | ndev = vlan_dev_real_dev(ndev); | ||
203 | |||
204 | mutex_lock(&cdev_mutex); | ||
205 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | ||
206 | for (i = 0; i < cdev->nports; i++) { | ||
207 | if (ndev == cdev->ports[i]) { | ||
208 | mutex_unlock(&cdev_mutex); | ||
209 | if (port) | ||
210 | *port = i; | ||
211 | return cdev; | ||
212 | } | ||
213 | } | ||
214 | } | ||
215 | mutex_unlock(&cdev_mutex); | ||
216 | log_debug(1 << CXGBI_DBG_DEV, | ||
217 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); | ||
218 | return NULL; | ||
219 | } | ||
220 | |||
221 | struct cxgbi_hba *cxgbi_hba_find_by_netdev(struct net_device *dev, | ||
222 | struct cxgbi_device *cdev) | ||
223 | { | ||
224 | int i; | ||
225 | |||
226 | if (dev->priv_flags & IFF_802_1Q_VLAN) | ||
227 | dev = vlan_dev_real_dev(dev); | ||
228 | |||
229 | for (i = 0; i < cdev->nports; i++) { | ||
230 | if (cdev->hbas[i]->ndev == dev) | ||
231 | return cdev->hbas[i]; | ||
232 | } | ||
233 | log_debug(1 << CXGBI_DBG_DEV, | ||
234 | "ndev 0x%p, %s, cdev 0x%p, NO match found.\n", | ||
235 | dev, dev->name, cdev); | ||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) | ||
240 | { | ||
241 | int i; | ||
242 | struct cxgbi_hba *chba; | ||
243 | |||
244 | log_debug(1 << CXGBI_DBG_DEV, | ||
245 | "cdev 0x%p, p#%u.\n", cdev, cdev->nports); | ||
246 | |||
247 | for (i = 0; i < cdev->nports; i++) { | ||
248 | chba = cdev->hbas[i]; | ||
249 | if (chba) { | ||
250 | cdev->hbas[i] = NULL; | ||
251 | iscsi_host_remove(chba->shost); | ||
252 | pci_dev_put(cdev->pdev); | ||
253 | iscsi_host_free(chba->shost); | ||
254 | } | ||
255 | } | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); | ||
258 | |||
259 | int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, | ||
260 | unsigned int max_id, struct scsi_host_template *sht, | ||
261 | struct scsi_transport_template *stt) | ||
262 | { | ||
263 | struct cxgbi_hba *chba; | ||
264 | struct Scsi_Host *shost; | ||
265 | int i, err; | ||
266 | |||
267 | log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); | ||
268 | |||
269 | for (i = 0; i < cdev->nports; i++) { | ||
270 | shost = iscsi_host_alloc(sht, sizeof(*chba), 1); | ||
271 | if (!shost) { | ||
272 | pr_info("0x%p, p%d, %s, host alloc failed.\n", | ||
273 | cdev, i, cdev->ports[i]->name); | ||
274 | err = -ENOMEM; | ||
275 | goto err_out; | ||
276 | } | ||
277 | |||
278 | shost->transportt = stt; | ||
279 | shost->max_lun = max_lun; | ||
280 | shost->max_id = max_id; | ||
281 | shost->max_channel = 0; | ||
282 | shost->max_cmd_len = 16; | ||
283 | |||
284 | chba = iscsi_host_priv(shost); | ||
285 | chba->cdev = cdev; | ||
286 | chba->ndev = cdev->ports[i]; | ||
287 | chba->shost = shost; | ||
288 | |||
289 | log_debug(1 << CXGBI_DBG_DEV, | ||
290 | "cdev 0x%p, p#%d %s: chba 0x%p.\n", | ||
291 | cdev, i, cdev->ports[i]->name, chba); | ||
292 | |||
293 | pci_dev_get(cdev->pdev); | ||
294 | err = iscsi_host_add(shost, &cdev->pdev->dev); | ||
295 | if (err) { | ||
296 | pr_info("cdev 0x%p, p#%d %s, host add failed.\n", | ||
297 | cdev, i, cdev->ports[i]->name); | ||
298 | pci_dev_put(cdev->pdev); | ||
299 | scsi_host_put(shost); | ||
300 | goto err_out; | ||
301 | } | ||
302 | |||
303 | cdev->hbas[i] = chba; | ||
304 | } | ||
305 | |||
306 | return 0; | ||
307 | |||
308 | err_out: | ||
309 | cxgbi_hbas_remove(cdev); | ||
310 | return err; | ||
311 | } | ||
312 | EXPORT_SYMBOL_GPL(cxgbi_hbas_add); | ||
313 | |||
314 | /* | ||
315 | * iSCSI offload | ||
316 | * | ||
317 | * - source port management | ||
318 | * To find a free source port in the port allocation map we use a very simple | ||
319 | * rotor scheme to look for the next free port. | ||
320 | * | ||
321 | * If a source port has been specified make sure that it doesn't collide with | ||
322 | * our normal source port allocation map. If it's outside the range of our | ||
323 | * allocation/deallocation scheme just let them use it. | ||
324 | * | ||
325 | * If the source port is outside our allocation range, the caller is | ||
326 | * responsible for keeping track of their port usage. | ||
327 | */ | ||
328 | static int sock_get_port(struct cxgbi_sock *csk) | ||
329 | { | ||
330 | struct cxgbi_device *cdev = csk->cdev; | ||
331 | struct cxgbi_ports_map *pmap = &cdev->pmap; | ||
332 | unsigned int start; | ||
333 | int idx; | ||
334 | |||
335 | if (!pmap->max_connect) { | ||
336 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n", | ||
337 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); | ||
338 | return -EADDRNOTAVAIL; | ||
339 | } | ||
340 | |||
341 | if (csk->saddr.sin_port) { | ||
342 | pr_err("source port NON-ZERO %u.\n", | ||
343 | ntohs(csk->saddr.sin_port)); | ||
344 | return -EADDRINUSE; | ||
345 | } | ||
346 | |||
347 | spin_lock_bh(&pmap->lock); | ||
348 | if (pmap->used >= pmap->max_connect) { | ||
349 | spin_unlock_bh(&pmap->lock); | ||
350 | pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", | ||
351 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); | ||
352 | return -EADDRNOTAVAIL; | ||
353 | } | ||
354 | |||
355 | start = idx = pmap->next; | ||
356 | do { | ||
357 | if (++idx >= pmap->max_connect) | ||
358 | idx = 0; | ||
359 | if (!pmap->port_csk[idx]) { | ||
360 | pmap->used++; | ||
361 | csk->saddr.sin_port = | ||
362 | htons(pmap->sport_base + idx); | ||
363 | pmap->next = idx; | ||
364 | pmap->port_csk[idx] = csk; | ||
365 | spin_unlock_bh(&pmap->lock); | ||
366 | cxgbi_sock_get(csk); | ||
367 | log_debug(1 << CXGBI_DBG_SOCK, | ||
368 | "cdev 0x%p, p#%u %s, p %u, %u.\n", | ||
369 | cdev, csk->port_id, | ||
370 | cdev->ports[csk->port_id]->name, | ||
371 | pmap->sport_base + idx, pmap->next); | ||
372 | return 0; | ||
373 | } | ||
374 | } while (idx != start); | ||
375 | spin_unlock_bh(&pmap->lock); | ||
376 | |||
377 | /* should not happen */ | ||
378 | pr_warn("cdev 0x%p, p#%u %s, next %u?\n", | ||
379 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, | ||
380 | pmap->next); | ||
381 | return -EADDRNOTAVAIL; | ||
382 | } | ||
383 | |||
384 | static void sock_put_port(struct cxgbi_sock *csk) | ||
385 | { | ||
386 | struct cxgbi_device *cdev = csk->cdev; | ||
387 | struct cxgbi_ports_map *pmap = &cdev->pmap; | ||
388 | |||
389 | if (csk->saddr.sin_port) { | ||
390 | int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; | ||
391 | |||
392 | csk->saddr.sin_port = 0; | ||
393 | if (idx < 0 || idx >= pmap->max_connect) { | ||
394 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", | ||
395 | cdev, csk->port_id, | ||
396 | cdev->ports[csk->port_id]->name, | ||
397 | ntohs(csk->saddr.sin_port)); | ||
398 | return; | ||
399 | } | ||
400 | |||
401 | spin_lock_bh(&pmap->lock); | ||
402 | pmap->port_csk[idx] = NULL; | ||
403 | pmap->used--; | ||
404 | spin_unlock_bh(&pmap->lock); | ||
405 | |||
406 | log_debug(1 << CXGBI_DBG_SOCK, | ||
407 | "cdev 0x%p, p#%u %s, release %u.\n", | ||
408 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, | ||
409 | pmap->sport_base + idx); | ||
410 | |||
411 | cxgbi_sock_put(csk); | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * iscsi tcp connection | ||
417 | */ | ||
418 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) | ||
419 | { | ||
420 | if (csk->cpl_close) { | ||
421 | kfree_skb(csk->cpl_close); | ||
422 | csk->cpl_close = NULL; | ||
423 | } | ||
424 | if (csk->cpl_abort_req) { | ||
425 | kfree_skb(csk->cpl_abort_req); | ||
426 | csk->cpl_abort_req = NULL; | ||
427 | } | ||
428 | if (csk->cpl_abort_rpl) { | ||
429 | kfree_skb(csk->cpl_abort_rpl); | ||
430 | csk->cpl_abort_rpl = NULL; | ||
431 | } | ||
432 | } | ||
433 | EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); | ||
434 | |||
435 | static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) | ||
436 | { | ||
437 | struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); | ||
438 | |||
439 | if (!csk) { | ||
440 | pr_info("alloc csk %zu failed.\n", sizeof(*csk)); | ||
441 | return NULL; | ||
442 | } | ||
443 | |||
444 | if (cdev->csk_alloc_cpls(csk) < 0) { | ||
445 | pr_info("csk 0x%p, alloc cpls failed.\n", csk); | ||
446 | kfree(csk); | ||
447 | return NULL; | ||
448 | } | ||
449 | |||
450 | spin_lock_init(&csk->lock); | ||
451 | kref_init(&csk->refcnt); | ||
452 | skb_queue_head_init(&csk->receive_queue); | ||
453 | skb_queue_head_init(&csk->write_queue); | ||
454 | setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); | ||
455 | rwlock_init(&csk->callback_lock); | ||
456 | csk->cdev = cdev; | ||
457 | csk->flags = 0; | ||
458 | cxgbi_sock_set_state(csk, CTP_CLOSED); | ||
459 | |||
460 | log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); | ||
461 | |||
462 | return csk; | ||
463 | } | ||
464 | |||
465 | static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, | ||
466 | __be16 sport, __be16 dport, u8 tos) | ||
467 | { | ||
468 | struct rtable *rt; | ||
469 | struct flowi fl = { | ||
470 | .oif = 0, | ||
471 | .nl_u = { | ||
472 | .ip4_u = { | ||
473 | .daddr = daddr, | ||
474 | .saddr = saddr, | ||
475 | .tos = tos } | ||
476 | }, | ||
477 | .proto = IPPROTO_TCP, | ||
478 | .uli_u = { | ||
479 | .ports = { | ||
480 | .sport = sport, | ||
481 | .dport = dport } | ||
482 | } | ||
483 | }; | ||
484 | |||
485 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) | ||
486 | return NULL; | ||
487 | |||
488 | return rt; | ||
489 | } | ||
490 | |||
491 | static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) | ||
492 | { | ||
493 | struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; | ||
494 | struct dst_entry *dst; | ||
495 | struct net_device *ndev; | ||
496 | struct cxgbi_device *cdev; | ||
497 | struct rtable *rt = NULL; | ||
498 | struct cxgbi_sock *csk = NULL; | ||
499 | unsigned int mtu = 0; | ||
500 | int port = 0xFFFF; | ||
501 | int err = 0; | ||
502 | |||
503 | if (daddr->sin_family != AF_INET) { | ||
504 | pr_info("address family 0x%x NOT supported.\n", | ||
505 | daddr->sin_family); | ||
506 | err = -EAFNOSUPPORT; | ||
507 | goto err_out; | ||
508 | } | ||
509 | |||
510 | rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); | ||
511 | if (!rt) { | ||
512 | pr_info("no route to ipv4 0x%x, port %u.\n", | ||
513 | daddr->sin_addr.s_addr, daddr->sin_port); | ||
514 | err = -ENETUNREACH; | ||
515 | goto err_out; | ||
516 | } | ||
517 | dst = &rt->dst; | ||
518 | ndev = dst->neighbour->dev; | ||
519 | |||
520 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | ||
521 | pr_info("multi-cast route %pI4, port %u, dev %s.\n", | ||
522 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), | ||
523 | ndev->name); | ||
524 | err = -ENETUNREACH; | ||
525 | goto rel_rt; | ||
526 | } | ||
527 | |||
528 | if (ndev->flags & IFF_LOOPBACK) { | ||
529 | ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); | ||
530 | mtu = ndev->mtu; | ||
531 | pr_info("rt dev %s, loopback -> %s, mtu %u.\n", | ||
532 | dst->neighbour->dev->name, ndev->name, mtu); | ||
533 | } | ||
534 | |||
535 | if (ndev->priv_flags & IFF_802_1Q_VLAN) { | ||
536 | ndev = vlan_dev_real_dev(ndev); | ||
537 | pr_info("rt dev %s, vlan -> %s.\n", | ||
538 | dst->neighbour->dev->name, ndev->name); | ||
539 | } | ||
540 | |||
541 | cdev = cxgbi_device_find_by_netdev(ndev, &port); | ||
542 | if (!cdev) { | ||
543 | pr_info("dst %pI4, %s, NOT cxgbi device.\n", | ||
544 | &daddr->sin_addr.s_addr, ndev->name); | ||
545 | err = -ENETUNREACH; | ||
546 | goto rel_rt; | ||
547 | } | ||
548 | log_debug(1 << CXGBI_DBG_SOCK, | ||
549 | "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", | ||
550 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), | ||
551 | port, ndev->name, cdev); | ||
552 | |||
553 | csk = cxgbi_sock_create(cdev); | ||
554 | if (!csk) { | ||
555 | err = -ENOMEM; | ||
556 | goto rel_rt; | ||
557 | } | ||
558 | csk->cdev = cdev; | ||
559 | csk->port_id = port; | ||
560 | csk->mtu = mtu; | ||
561 | csk->dst = dst; | ||
562 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; | ||
563 | csk->daddr.sin_port = daddr->sin_port; | ||
564 | if (cdev->hbas[port]->ipv4addr) | ||
565 | csk->saddr.sin_addr.s_addr = cdev->hbas[port]->ipv4addr; | ||
566 | else | ||
567 | csk->saddr.sin_addr.s_addr = rt->rt_src; | ||
568 | |||
569 | return csk; | ||
570 | |||
571 | rel_rt: | ||
572 | ip_rt_put(rt); | ||
573 | if (csk) | ||
574 | cxgbi_sock_closed(csk); | ||
575 | err_out: | ||
576 | return ERR_PTR(err); | ||
577 | } | ||
578 | |||
579 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, | ||
580 | unsigned int opt) | ||
581 | { | ||
582 | csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; | ||
583 | dst_confirm(csk->dst); | ||
584 | smp_mb(); | ||
585 | cxgbi_sock_set_state(csk, CTP_ESTABLISHED); | ||
586 | } | ||
587 | EXPORT_SYMBOL_GPL(cxgbi_sock_established); | ||
588 | |||
589 | static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) | ||
590 | { | ||
591 | log_debug(1 << CXGBI_DBG_SOCK, | ||
592 | "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", | ||
593 | csk, csk->state, csk->flags, csk->user_data); | ||
594 | |||
595 | if (csk->state != CTP_ESTABLISHED) { | ||
596 | read_lock(&csk->callback_lock); | ||
597 | if (csk->user_data) | ||
598 | iscsi_conn_failure(csk->user_data, | ||
599 | ISCSI_ERR_CONN_FAILED); | ||
600 | read_unlock(&csk->callback_lock); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | void cxgbi_sock_closed(struct cxgbi_sock *csk) | ||
605 | { | ||
606 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | ||
607 | csk, (csk)->state, (csk)->flags, (csk)->tid); | ||
608 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); | ||
609 | if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) | ||
610 | return; | ||
611 | if (csk->saddr.sin_port) | ||
612 | sock_put_port(csk); | ||
613 | if (csk->dst) | ||
614 | dst_release(csk->dst); | ||
615 | csk->cdev->csk_release_offload_resources(csk); | ||
616 | cxgbi_sock_set_state(csk, CTP_CLOSED); | ||
617 | cxgbi_inform_iscsi_conn_closing(csk); | ||
618 | cxgbi_sock_put(csk); | ||
619 | } | ||
620 | EXPORT_SYMBOL_GPL(cxgbi_sock_closed); | ||
621 | |||
622 | static void need_active_close(struct cxgbi_sock *csk) | ||
623 | { | ||
624 | int data_lost; | ||
625 | int close_req = 0; | ||
626 | |||
627 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | ||
628 | csk, (csk)->state, (csk)->flags, (csk)->tid); | ||
629 | spin_lock_bh(&csk->lock); | ||
630 | dst_confirm(csk->dst); | ||
631 | data_lost = skb_queue_len(&csk->receive_queue); | ||
632 | __skb_queue_purge(&csk->receive_queue); | ||
633 | |||
634 | if (csk->state == CTP_ACTIVE_OPEN) | ||
635 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); | ||
636 | else if (csk->state == CTP_ESTABLISHED) { | ||
637 | close_req = 1; | ||
638 | cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); | ||
639 | } else if (csk->state == CTP_PASSIVE_CLOSE) { | ||
640 | close_req = 1; | ||
641 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); | ||
642 | } | ||
643 | |||
644 | if (close_req) { | ||
645 | if (data_lost) | ||
646 | csk->cdev->csk_send_abort_req(csk); | ||
647 | else | ||
648 | csk->cdev->csk_send_close_req(csk); | ||
649 | } | ||
650 | |||
651 | spin_unlock_bh(&csk->lock); | ||
652 | } | ||
653 | |||
654 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) | ||
655 | { | ||
656 | pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", | ||
657 | csk, csk->state, csk->flags, | ||
658 | &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, | ||
659 | &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, | ||
660 | errno); | ||
661 | |||
662 | cxgbi_sock_set_state(csk, CTP_CONNECTING); | ||
663 | csk->err = errno; | ||
664 | cxgbi_sock_closed(csk); | ||
665 | } | ||
666 | EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); | ||
667 | |||
668 | void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) | ||
669 | { | ||
670 | struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; | ||
671 | |||
672 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | ||
673 | csk, (csk)->state, (csk)->flags, (csk)->tid); | ||
674 | cxgbi_sock_get(csk); | ||
675 | spin_lock_bh(&csk->lock); | ||
676 | if (csk->state == CTP_ACTIVE_OPEN) | ||
677 | cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); | ||
678 | spin_unlock_bh(&csk->lock); | ||
679 | cxgbi_sock_put(csk); | ||
680 | __kfree_skb(skb); | ||
681 | } | ||
682 | EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); | ||
683 | |||
684 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) | ||
685 | { | ||
686 | cxgbi_sock_get(csk); | ||
687 | spin_lock_bh(&csk->lock); | ||
688 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | ||
689 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) | ||
690 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); | ||
691 | else { | ||
692 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); | ||
693 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); | ||
694 | if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) | ||
695 | pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", | ||
696 | csk, csk->state, csk->flags, csk->tid); | ||
697 | cxgbi_sock_closed(csk); | ||
698 | } | ||
699 | } | ||
700 | spin_unlock_bh(&csk->lock); | ||
701 | cxgbi_sock_put(csk); | ||
702 | } | ||
703 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); | ||
704 | |||
705 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) | ||
706 | { | ||
707 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | ||
708 | csk, (csk)->state, (csk)->flags, (csk)->tid); | ||
709 | cxgbi_sock_get(csk); | ||
710 | spin_lock_bh(&csk->lock); | ||
711 | |||
712 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) | ||
713 | goto done; | ||
714 | |||
715 | switch (csk->state) { | ||
716 | case CTP_ESTABLISHED: | ||
717 | cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); | ||
718 | break; | ||
719 | case CTP_ACTIVE_CLOSE: | ||
720 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); | ||
721 | break; | ||
722 | case CTP_CLOSE_WAIT_1: | ||
723 | cxgbi_sock_closed(csk); | ||
724 | break; | ||
725 | case CTP_ABORTING: | ||
726 | break; | ||
727 | default: | ||
728 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", | ||
729 | csk, csk->state, csk->flags, csk->tid); | ||
730 | } | ||
731 | cxgbi_inform_iscsi_conn_closing(csk); | ||
732 | done: | ||
733 | spin_unlock_bh(&csk->lock); | ||
734 | cxgbi_sock_put(csk); | ||
735 | } | ||
736 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); | ||
737 | |||
738 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) | ||
739 | { | ||
740 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | ||
741 | csk, (csk)->state, (csk)->flags, (csk)->tid); | ||
742 | cxgbi_sock_get(csk); | ||
743 | spin_lock_bh(&csk->lock); | ||
744 | |||
745 | csk->snd_una = snd_nxt - 1; | ||
746 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) | ||
747 | goto done; | ||
748 | |||
749 | switch (csk->state) { | ||
750 | case CTP_ACTIVE_CLOSE: | ||
751 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); | ||
752 | break; | ||
753 | case CTP_CLOSE_WAIT_1: | ||
754 | case CTP_CLOSE_WAIT_2: | ||
755 | cxgbi_sock_closed(csk); | ||
756 | break; | ||
757 | case CTP_ABORTING: | ||
758 | break; | ||
759 | default: | ||
760 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", | ||
761 | csk, csk->state, csk->flags, csk->tid); | ||
762 | } | ||
763 | done: | ||
764 | spin_unlock_bh(&csk->lock); | ||
765 | cxgbi_sock_put(csk); | ||
766 | } | ||
767 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); | ||
768 | |||
769 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, | ||
770 | unsigned int snd_una, int seq_chk) | ||
771 | { | ||
772 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
773 | "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", | ||
774 | csk, csk->state, csk->flags, csk->tid, credits, | ||
775 | csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); | ||
776 | |||
777 | spin_lock_bh(&csk->lock); | ||
778 | |||
779 | csk->wr_cred += credits; | ||
780 | if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) | ||
781 | csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; | ||
782 | |||
783 | while (credits) { | ||
784 | struct sk_buff *p = cxgbi_sock_peek_wr(csk); | ||
785 | |||
786 | if (unlikely(!p)) { | ||
787 | pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", | ||
788 | csk, csk->state, csk->flags, csk->tid, credits, | ||
789 | csk->wr_cred, csk->wr_una_cred); | ||
790 | break; | ||
791 | } | ||
792 | |||
793 | if (unlikely(credits < p->csum)) { | ||
794 | pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", | ||
795 | csk, csk->state, csk->flags, csk->tid, | ||
796 | credits, csk->wr_cred, csk->wr_una_cred, | ||
797 | p->csum); | ||
798 | p->csum -= credits; | ||
799 | break; | ||
800 | } else { | ||
801 | cxgbi_sock_dequeue_wr(csk); | ||
802 | credits -= p->csum; | ||
803 | kfree_skb(p); | ||
804 | } | ||
805 | } | ||
806 | |||
807 | cxgbi_sock_check_wr_invariants(csk); | ||
808 | |||
809 | if (seq_chk) { | ||
810 | if (unlikely(before(snd_una, csk->snd_una))) { | ||
811 | pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", | ||
812 | csk, csk->state, csk->flags, csk->tid, snd_una, | ||
813 | csk->snd_una); | ||
814 | goto done; | ||
815 | } | ||
816 | |||
817 | if (csk->snd_una != snd_una) { | ||
818 | csk->snd_una = snd_una; | ||
819 | dst_confirm(csk->dst); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | if (skb_queue_len(&csk->write_queue)) { | ||
824 | if (csk->cdev->csk_push_tx_frames(csk, 0)) | ||
825 | cxgbi_conn_tx_open(csk); | ||
826 | } else | ||
827 | cxgbi_conn_tx_open(csk); | ||
828 | done: | ||
829 | spin_unlock_bh(&csk->lock); | ||
830 | } | ||
831 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); | ||
832 | |||
833 | static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, | ||
834 | unsigned short mtu) | ||
835 | { | ||
836 | int i = 0; | ||
837 | |||
838 | while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) | ||
839 | ++i; | ||
840 | |||
841 | return i; | ||
842 | } | ||
843 | |||
844 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) | ||
845 | { | ||
846 | unsigned int idx; | ||
847 | struct dst_entry *dst = csk->dst; | ||
848 | |||
849 | csk->advmss = dst_metric(dst, RTAX_ADVMSS); | ||
850 | |||
851 | if (csk->advmss > pmtu - 40) | ||
852 | csk->advmss = pmtu - 40; | ||
853 | if (csk->advmss < csk->cdev->mtus[0] - 40) | ||
854 | csk->advmss = csk->cdev->mtus[0] - 40; | ||
855 | idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); | ||
856 | |||
857 | return idx; | ||
858 | } | ||
859 | EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); | ||
860 | |||
861 | void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) | ||
862 | { | ||
863 | cxgbi_skcb_tcp_seq(skb) = csk->write_seq; | ||
864 | __skb_queue_tail(&csk->write_queue, skb); | ||
865 | } | ||
866 | EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); | ||
867 | |||
868 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) | ||
869 | { | ||
870 | struct sk_buff *skb; | ||
871 | |||
872 | while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) | ||
873 | kfree_skb(skb); | ||
874 | } | ||
875 | EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); | ||
876 | |||
877 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) | ||
878 | { | ||
879 | int pending = cxgbi_sock_count_pending_wrs(csk); | ||
880 | |||
881 | if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) | ||
882 | pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", | ||
883 | csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); | ||
884 | } | ||
885 | EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); | ||
886 | |||
887 | static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) | ||
888 | { | ||
889 | struct cxgbi_device *cdev = csk->cdev; | ||
890 | struct sk_buff *next; | ||
891 | int err, copied = 0; | ||
892 | |||
893 | spin_lock_bh(&csk->lock); | ||
894 | |||
895 | if (csk->state != CTP_ESTABLISHED) { | ||
896 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
897 | "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", | ||
898 | csk, csk->state, csk->flags, csk->tid); | ||
899 | err = -EAGAIN; | ||
900 | goto out_err; | ||
901 | } | ||
902 | |||
903 | if (csk->err) { | ||
904 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
905 | "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", | ||
906 | csk, csk->state, csk->flags, csk->tid, csk->err); | ||
907 | err = -EPIPE; | ||
908 | goto out_err; | ||
909 | } | ||
910 | |||
911 | if (csk->write_seq - csk->snd_una >= cdev->snd_win) { | ||
912 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
913 | "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", | ||
914 | csk, csk->state, csk->flags, csk->tid, csk->write_seq, | ||
915 | csk->snd_una, cdev->snd_win); | ||
916 | err = -ENOBUFS; | ||
917 | goto out_err; | ||
918 | } | ||
919 | |||
920 | while (skb) { | ||
921 | int frags = skb_shinfo(skb)->nr_frags + | ||
922 | (skb->len != skb->data_len); | ||
923 | |||
924 | if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { | ||
925 | pr_err("csk 0x%p, skb head %u < %u.\n", | ||
926 | csk, skb_headroom(skb), cdev->skb_tx_rsvd); | ||
927 | err = -EINVAL; | ||
928 | goto out_err; | ||
929 | } | ||
930 | |||
931 | if (frags >= SKB_WR_LIST_SIZE) { | ||
932 | pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", | ||
933 | csk, skb_shinfo(skb)->nr_frags, skb->len, | ||
934 | skb->data_len, (uint)(SKB_WR_LIST_SIZE)); | ||
935 | err = -EINVAL; | ||
936 | goto out_err; | ||
937 | } | ||
938 | |||
939 | next = skb->next; | ||
940 | skb->next = NULL; | ||
941 | cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); | ||
942 | cxgbi_sock_skb_entail(csk, skb); | ||
943 | copied += skb->len; | ||
944 | csk->write_seq += skb->len + | ||
945 | cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | ||
946 | skb = next; | ||
947 | } | ||
948 | done: | ||
949 | if (likely(skb_queue_len(&csk->write_queue))) | ||
950 | cdev->csk_push_tx_frames(csk, 1); | ||
951 | spin_unlock_bh(&csk->lock); | ||
952 | return copied; | ||
953 | |||
954 | out_err: | ||
955 | if (copied == 0 && err == -EPIPE) | ||
956 | copied = csk->err ? csk->err : -EPIPE; | ||
957 | else | ||
958 | copied = err; | ||
959 | goto done; | ||
960 | } | ||
961 | |||
962 | /* | ||
963 | * Direct Data Placement - | ||
964 | * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted | ||
965 | * final destination host-memory buffers based on the Initiator Task Tag (ITT) | ||
966 | * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. | ||
967 | * The host memory address is programmed into h/w in the format of pagepod | ||
968 | * entries. | ||
969 | * The location of the pagepod entry is encoded into ddp tag which is used as | ||
970 | * the base for ITT/TTT. | ||
971 | */ | ||
972 | |||
973 | static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; | ||
974 | static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; | ||
975 | static unsigned char page_idx = DDP_PGIDX_MAX; | ||
976 | |||
977 | static unsigned char sw_tag_idx_bits; | ||
978 | static unsigned char sw_tag_age_bits; | ||
979 | |||
980 | /* | ||
981 | * Direct-Data Placement page size adjustment | ||
982 | */ | ||
983 | static int ddp_adjust_page_table(void) | ||
984 | { | ||
985 | int i; | ||
986 | unsigned int base_order, order; | ||
987 | |||
988 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | ||
989 | pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", | ||
990 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | ||
991 | return -EINVAL; | ||
992 | } | ||
993 | |||
994 | base_order = get_order(1UL << ddp_page_shift[0]); | ||
995 | order = get_order(1UL << PAGE_SHIFT); | ||
996 | |||
997 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
998 | /* first is the kernel page size, then just doubling */ | ||
999 | ddp_page_order[i] = order - base_order + i; | ||
1000 | ddp_page_shift[i] = PAGE_SHIFT + i; | ||
1001 | } | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | static int ddp_find_page_index(unsigned long pgsz) | ||
1006 | { | ||
1007 | int i; | ||
1008 | |||
1009 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
1010 | if (pgsz == (1UL << ddp_page_shift[i])) | ||
1011 | return i; | ||
1012 | } | ||
1013 | pr_info("ddp page size %lu not supported.\n", pgsz); | ||
1014 | return DDP_PGIDX_MAX; | ||
1015 | } | ||
1016 | |||
1017 | static void ddp_setup_host_page_size(void) | ||
1018 | { | ||
1019 | if (page_idx == DDP_PGIDX_MAX) { | ||
1020 | page_idx = ddp_find_page_index(PAGE_SIZE); | ||
1021 | |||
1022 | if (page_idx == DDP_PGIDX_MAX) { | ||
1023 | pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); | ||
1024 | if (ddp_adjust_page_table() < 0) { | ||
1025 | pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); | ||
1026 | return; | ||
1027 | } | ||
1028 | page_idx = ddp_find_page_index(PAGE_SIZE); | ||
1029 | } | ||
1030 | pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | void cxgbi_ddp_page_size_factor(int *pgsz_factor) | ||
1035 | { | ||
1036 | int i; | ||
1037 | |||
1038 | for (i = 0; i < DDP_PGIDX_MAX; i++) | ||
1039 | pgsz_factor[i] = ddp_page_order[i]; | ||
1040 | } | ||
1041 | EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); | ||
1042 | |||
1043 | /* | ||
1044 | * DDP setup & teardown | ||
1045 | */ | ||
1046 | |||
1047 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, | ||
1048 | struct cxgbi_pagepod_hdr *hdr, | ||
1049 | struct cxgbi_gather_list *gl, unsigned int gidx) | ||
1050 | { | ||
1051 | int i; | ||
1052 | |||
1053 | memcpy(ppod, hdr, sizeof(*hdr)); | ||
1054 | for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { | ||
1055 | ppod->addr[i] = gidx < gl->nelem ? | ||
1056 | cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; | ||
1057 | } | ||
1058 | } | ||
1059 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); | ||
1060 | |||
1061 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) | ||
1062 | { | ||
1063 | memset(ppod, 0, sizeof(*ppod)); | ||
1064 | } | ||
1065 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); | ||
1066 | |||
1067 | static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, | ||
1068 | unsigned int start, unsigned int max, | ||
1069 | unsigned int count, | ||
1070 | struct cxgbi_gather_list *gl) | ||
1071 | { | ||
1072 | unsigned int i, j, k; | ||
1073 | |||
1074 | /* not enough entries */ | ||
1075 | if ((max - start) < count) { | ||
1076 | log_debug(1 << CXGBI_DBG_DDP, | ||
1077 | "NOT enough entries %u+%u < %u.\n", start, count, max); | ||
1078 | return -EBUSY; | ||
1079 | } | ||
1080 | |||
1081 | max -= count; | ||
1082 | spin_lock(&ddp->map_lock); | ||
1083 | for (i = start; i < max;) { | ||
1084 | for (j = 0, k = i; j < count; j++, k++) { | ||
1085 | if (ddp->gl_map[k]) | ||
1086 | break; | ||
1087 | } | ||
1088 | if (j == count) { | ||
1089 | for (j = 0, k = i; j < count; j++, k++) | ||
1090 | ddp->gl_map[k] = gl; | ||
1091 | spin_unlock(&ddp->map_lock); | ||
1092 | return i; | ||
1093 | } | ||
1094 | i += j + 1; | ||
1095 | } | ||
1096 | spin_unlock(&ddp->map_lock); | ||
1097 | log_debug(1 << CXGBI_DBG_DDP, | ||
1098 | "NO suitable entries %u available.\n", count); | ||
1099 | return -EBUSY; | ||
1100 | } | ||
1101 | |||
1102 | static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, | ||
1103 | int start, int count) | ||
1104 | { | ||
1105 | spin_lock(&ddp->map_lock); | ||
1106 | memset(&ddp->gl_map[start], 0, | ||
1107 | count * sizeof(struct cxgbi_gather_list *)); | ||
1108 | spin_unlock(&ddp->map_lock); | ||
1109 | } | ||
1110 | |||
1111 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | ||
1112 | struct cxgbi_gather_list *gl) | ||
1113 | { | ||
1114 | int i; | ||
1115 | |||
1116 | for (i = 0; i < gl->nelem; i++) | ||
1117 | dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, | ||
1118 | PCI_DMA_FROMDEVICE); | ||
1119 | } | ||
1120 | |||
1121 | static inline int ddp_gl_map(struct pci_dev *pdev, | ||
1122 | struct cxgbi_gather_list *gl) | ||
1123 | { | ||
1124 | int i; | ||
1125 | |||
1126 | for (i = 0; i < gl->nelem; i++) { | ||
1127 | gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, | ||
1128 | PAGE_SIZE, | ||
1129 | PCI_DMA_FROMDEVICE); | ||
1130 | if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { | ||
1131 | log_debug(1 << CXGBI_DBG_DDP, | ||
1132 | "page %d 0x%p, 0x%p dma mapping err.\n", | ||
1133 | i, gl->pages[i], pdev); | ||
1134 | goto unmap; | ||
1135 | } | ||
1136 | } | ||
1137 | return i; | ||
1138 | unmap: | ||
1139 | if (i) { | ||
1140 | unsigned int nelem = gl->nelem; | ||
1141 | |||
1142 | gl->nelem = i; | ||
1143 | ddp_gl_unmap(pdev, gl); | ||
1144 | gl->nelem = nelem; | ||
1145 | } | ||
1146 | return -EINVAL; | ||
1147 | } | ||
1148 | |||
1149 | static void ddp_release_gl(struct cxgbi_gather_list *gl, | ||
1150 | struct pci_dev *pdev) | ||
1151 | { | ||
1152 | ddp_gl_unmap(pdev, gl); | ||
1153 | kfree(gl); | ||
1154 | } | ||
1155 | |||
1156 | static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, | ||
1157 | struct scatterlist *sgl, | ||
1158 | unsigned int sgcnt, | ||
1159 | struct pci_dev *pdev, | ||
1160 | gfp_t gfp) | ||
1161 | { | ||
1162 | struct cxgbi_gather_list *gl; | ||
1163 | struct scatterlist *sg = sgl; | ||
1164 | struct page *sgpage = sg_page(sg); | ||
1165 | unsigned int sglen = sg->length; | ||
1166 | unsigned int sgoffset = sg->offset; | ||
1167 | unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> | ||
1168 | PAGE_SHIFT; | ||
1169 | int i = 1, j = 0; | ||
1170 | |||
1171 | if (xferlen < DDP_THRESHOLD) { | ||
1172 | log_debug(1 << CXGBI_DBG_DDP, | ||
1173 | "xfer %u < threshold %u, no ddp.\n", | ||
1174 | xferlen, DDP_THRESHOLD); | ||
1175 | return NULL; | ||
1176 | } | ||
1177 | |||
1178 | gl = kzalloc(sizeof(struct cxgbi_gather_list) + | ||
1179 | npages * (sizeof(dma_addr_t) + | ||
1180 | sizeof(struct page *)), gfp); | ||
1181 | if (!gl) { | ||
1182 | log_debug(1 << CXGBI_DBG_DDP, | ||
1183 | "xfer %u, %u pages, OOM.\n", xferlen, npages); | ||
1184 | return NULL; | ||
1185 | } | ||
1186 | |||
1187 | log_debug(1 << CXGBI_DBG_DDP, | ||
1188 | "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); | ||
1189 | |||
1190 | gl->pages = (struct page **)&gl->phys_addr[npages]; | ||
1191 | gl->nelem = npages; | ||
1192 | gl->length = xferlen; | ||
1193 | gl->offset = sgoffset; | ||
1194 | gl->pages[0] = sgpage; | ||
1195 | |||
1196 | for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; | ||
1197 | i++, sg = sg_next(sg)) { | ||
1198 | struct page *page = sg_page(sg); | ||
1199 | |||
1200 | if (sgpage == page && sg->offset == sgoffset + sglen) | ||
1201 | sglen += sg->length; | ||
1202 | else { | ||
1203 | /* make sure the sgl is fit for ddp: | ||
1204 | * each has the same page size, and | ||
1205 | * all of the middle pages are used completely | ||
1206 | */ | ||
1207 | if ((j && sgoffset) || ((i != sgcnt - 1) && | ||
1208 | ((sglen + sgoffset) & ~PAGE_MASK))) { | ||
1209 | log_debug(1 << CXGBI_DBG_DDP, | ||
1210 | "page %d/%u, %u + %u.\n", | ||
1211 | i, sgcnt, sgoffset, sglen); | ||
1212 | goto error_out; | ||
1213 | } | ||
1214 | |||
1215 | j++; | ||
1216 | if (j == gl->nelem || sg->offset) { | ||
1217 | log_debug(1 << CXGBI_DBG_DDP, | ||
1218 | "page %d/%u, offset %u.\n", | ||
1219 | j, gl->nelem, sg->offset); | ||
1220 | goto error_out; | ||
1221 | } | ||
1222 | gl->pages[j] = page; | ||
1223 | sglen = sg->length; | ||
1224 | sgoffset = sg->offset; | ||
1225 | sgpage = page; | ||
1226 | } | ||
1227 | } | ||
1228 | gl->nelem = ++j; | ||
1229 | |||
1230 | if (ddp_gl_map(pdev, gl) < 0) | ||
1231 | goto error_out; | ||
1232 | |||
1233 | return gl; | ||
1234 | |||
1235 | error_out: | ||
1236 | kfree(gl); | ||
1237 | return NULL; | ||
1238 | } | ||
1239 | |||
1240 | static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) | ||
1241 | { | ||
1242 | struct cxgbi_device *cdev = chba->cdev; | ||
1243 | struct cxgbi_ddp_info *ddp = cdev->ddp; | ||
1244 | u32 idx; | ||
1245 | |||
1246 | idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; | ||
1247 | if (idx < ddp->nppods) { | ||
1248 | struct cxgbi_gather_list *gl = ddp->gl_map[idx]; | ||
1249 | unsigned int npods; | ||
1250 | |||
1251 | if (!gl || !gl->nelem) { | ||
1252 | pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", | ||
1253 | tag, idx, gl, gl ? gl->nelem : 0); | ||
1254 | return; | ||
1255 | } | ||
1256 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
1257 | log_debug(1 << CXGBI_DBG_DDP, | ||
1258 | "tag 0x%x, release idx %u, npods %u.\n", | ||
1259 | tag, idx, npods); | ||
1260 | cdev->csk_ddp_clear(chba, tag, idx, npods); | ||
1261 | ddp_unmark_entries(ddp, idx, npods); | ||
1262 | ddp_release_gl(gl, ddp->pdev); | ||
1263 | } else | ||
1264 | pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); | ||
1265 | } | ||
1266 | |||
1267 | static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, | ||
1268 | u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, | ||
1269 | gfp_t gfp) | ||
1270 | { | ||
1271 | struct cxgbi_device *cdev = csk->cdev; | ||
1272 | struct cxgbi_ddp_info *ddp = cdev->ddp; | ||
1273 | struct cxgbi_tag_format *tformat = &cdev->tag_format; | ||
1274 | struct cxgbi_pagepod_hdr hdr; | ||
1275 | unsigned int npods; | ||
1276 | int idx = -1; | ||
1277 | int err = -ENOMEM; | ||
1278 | u32 tag; | ||
1279 | |||
1280 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
1281 | if (ddp->idx_last == ddp->nppods) | ||
1282 | idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, | ||
1283 | npods, gl); | ||
1284 | else { | ||
1285 | idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, | ||
1286 | ddp->nppods, npods, | ||
1287 | gl); | ||
1288 | if (idx < 0 && ddp->idx_last >= npods) { | ||
1289 | idx = ddp_find_unused_entries(ddp, 0, | ||
1290 | min(ddp->idx_last + npods, ddp->nppods), | ||
1291 | npods, gl); | ||
1292 | } | ||
1293 | } | ||
1294 | if (idx < 0) { | ||
1295 | log_debug(1 << CXGBI_DBG_DDP, | ||
1296 | "xferlen %u, gl %u, npods %u NO DDP.\n", | ||
1297 | gl->length, gl->nelem, npods); | ||
1298 | return idx; | ||
1299 | } | ||
1300 | |||
1301 | if (cdev->csk_ddp_alloc_gl_skb) { | ||
1302 | err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); | ||
1303 | if (err < 0) | ||
1304 | goto unmark_entries; | ||
1305 | } | ||
1306 | |||
1307 | tag = cxgbi_ddp_tag_base(tformat, sw_tag); | ||
1308 | tag |= idx << PPOD_IDX_SHIFT; | ||
1309 | |||
1310 | hdr.rsvd = 0; | ||
1311 | hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); | ||
1312 | hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); | ||
1313 | hdr.max_offset = htonl(gl->length); | ||
1314 | hdr.page_offset = htonl(gl->offset); | ||
1315 | |||
1316 | err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); | ||
1317 | if (err < 0) { | ||
1318 | if (cdev->csk_ddp_free_gl_skb) | ||
1319 | cdev->csk_ddp_free_gl_skb(ddp, idx, npods); | ||
1320 | goto unmark_entries; | ||
1321 | } | ||
1322 | |||
1323 | ddp->idx_last = idx; | ||
1324 | log_debug(1 << CXGBI_DBG_DDP, | ||
1325 | "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", | ||
1326 | gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, | ||
1327 | npods); | ||
1328 | *tagp = tag; | ||
1329 | return 0; | ||
1330 | |||
1331 | unmark_entries: | ||
1332 | ddp_unmark_entries(ddp, idx, npods); | ||
1333 | return err; | ||
1334 | } | ||
1335 | |||
1336 | int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, | ||
1337 | unsigned int sw_tag, unsigned int xferlen, | ||
1338 | struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) | ||
1339 | { | ||
1340 | struct cxgbi_device *cdev = csk->cdev; | ||
1341 | struct cxgbi_tag_format *tformat = &cdev->tag_format; | ||
1342 | struct cxgbi_gather_list *gl; | ||
1343 | int err; | ||
1344 | |||
1345 | if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || | ||
1346 | xferlen < DDP_THRESHOLD) { | ||
1347 | log_debug(1 << CXGBI_DBG_DDP, | ||
1348 | "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); | ||
1349 | return -EINVAL; | ||
1350 | } | ||
1351 | |||
1352 | if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { | ||
1353 | log_debug(1 << CXGBI_DBG_DDP, | ||
1354 | "sw_tag 0x%x NOT usable.\n", sw_tag); | ||
1355 | return -EINVAL; | ||
1356 | } | ||
1357 | |||
1358 | gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); | ||
1359 | if (!gl) | ||
1360 | return -ENOMEM; | ||
1361 | |||
1362 | err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); | ||
1363 | if (err < 0) | ||
1364 | ddp_release_gl(gl, cdev->pdev); | ||
1365 | |||
1366 | return err; | ||
1367 | } | ||
1368 | |||
1369 | static void ddp_destroy(struct kref *kref) | ||
1370 | { | ||
1371 | struct cxgbi_ddp_info *ddp = container_of(kref, | ||
1372 | struct cxgbi_ddp_info, | ||
1373 | refcnt); | ||
1374 | struct cxgbi_device *cdev = ddp->cdev; | ||
1375 | int i = 0; | ||
1376 | |||
1377 | pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); | ||
1378 | |||
1379 | while (i < ddp->nppods) { | ||
1380 | struct cxgbi_gather_list *gl = ddp->gl_map[i]; | ||
1381 | |||
1382 | if (gl) { | ||
1383 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
1384 | >> PPOD_PAGES_SHIFT; | ||
1385 | pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); | ||
1386 | kfree(gl); | ||
1387 | if (cdev->csk_ddp_free_gl_skb) | ||
1388 | cdev->csk_ddp_free_gl_skb(ddp, i, npods); | ||
1389 | i += npods; | ||
1390 | } else | ||
1391 | i++; | ||
1392 | } | ||
1393 | cxgbi_free_big_mem(ddp); | ||
1394 | } | ||
1395 | |||
1396 | int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) | ||
1397 | { | ||
1398 | struct cxgbi_ddp_info *ddp = cdev->ddp; | ||
1399 | |||
1400 | log_debug(1 << CXGBI_DBG_DDP, | ||
1401 | "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); | ||
1402 | cdev->ddp = NULL; | ||
1403 | if (ddp) | ||
1404 | return kref_put(&ddp->refcnt, ddp_destroy); | ||
1405 | return 0; | ||
1406 | } | ||
1407 | EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); | ||
1408 | |||
1409 | int cxgbi_ddp_init(struct cxgbi_device *cdev, | ||
1410 | unsigned int llimit, unsigned int ulimit, | ||
1411 | unsigned int max_txsz, unsigned int max_rxsz) | ||
1412 | { | ||
1413 | struct cxgbi_ddp_info *ddp; | ||
1414 | unsigned int ppmax, bits; | ||
1415 | |||
1416 | ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; | ||
1417 | bits = __ilog2_u32(ppmax) + 1; | ||
1418 | if (bits > PPOD_IDX_MAX_SIZE) | ||
1419 | bits = PPOD_IDX_MAX_SIZE; | ||
1420 | ppmax = (1 << (bits - 1)) - 1; | ||
1421 | |||
1422 | ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + | ||
1423 | ppmax * (sizeof(struct cxgbi_gather_list *) + | ||
1424 | sizeof(struct sk_buff *)), | ||
1425 | GFP_KERNEL); | ||
1426 | if (!ddp) { | ||
1427 | pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); | ||
1428 | return -ENOMEM; | ||
1429 | } | ||
1430 | ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); | ||
1431 | ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + | ||
1432 | ppmax * sizeof(struct cxgbi_gather_list *)); | ||
1433 | cdev->ddp = ddp; | ||
1434 | |||
1435 | spin_lock_init(&ddp->map_lock); | ||
1436 | kref_init(&ddp->refcnt); | ||
1437 | |||
1438 | ddp->cdev = cdev; | ||
1439 | ddp->pdev = cdev->pdev; | ||
1440 | ddp->llimit = llimit; | ||
1441 | ddp->ulimit = ulimit; | ||
1442 | ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); | ||
1443 | ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); | ||
1444 | ddp->nppods = ppmax; | ||
1445 | ddp->idx_last = ppmax; | ||
1446 | ddp->idx_bits = bits; | ||
1447 | ddp->idx_mask = (1 << bits) - 1; | ||
1448 | ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; | ||
1449 | |||
1450 | cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; | ||
1451 | cdev->tag_format.rsvd_bits = ddp->idx_bits; | ||
1452 | cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; | ||
1453 | cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; | ||
1454 | |||
1455 | pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", | ||
1456 | cdev->ports[0]->name, cdev->tag_format.sw_bits, | ||
1457 | cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, | ||
1458 | cdev->tag_format.rsvd_mask); | ||
1459 | |||
1460 | cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
1461 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
1462 | cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
1463 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
1464 | |||
1465 | log_debug(1 << CXGBI_DBG_DDP, | ||
1466 | "%s max payload size: %u/%u, %u/%u.\n", | ||
1467 | cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, | ||
1468 | cdev->rx_max_size, ddp->max_rxsz); | ||
1469 | return 0; | ||
1470 | } | ||
1471 | EXPORT_SYMBOL_GPL(cxgbi_ddp_init); | ||
1472 | |||
1473 | /* | ||
1474 | * APIs interacting with open-iscsi libraries | ||
1475 | */ | ||
1476 | |||
1477 | static unsigned char padding[4]; | ||
1478 | |||
1479 | static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) | ||
1480 | { | ||
1481 | struct scsi_cmnd *sc = task->sc; | ||
1482 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | ||
1483 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
1484 | struct cxgbi_hba *chba = cconn->chba; | ||
1485 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; | ||
1486 | u32 tag = ntohl((__force u32)hdr_itt); | ||
1487 | |||
1488 | log_debug(1 << CXGBI_DBG_DDP, | ||
1489 | "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); | ||
1490 | if (sc && | ||
1491 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && | ||
1492 | cxgbi_is_ddp_tag(tformat, tag)) | ||
1493 | ddp_tag_release(chba, tag); | ||
1494 | } | ||
1495 | |||
1496 | static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) | ||
1497 | { | ||
1498 | struct scsi_cmnd *sc = task->sc; | ||
1499 | struct iscsi_conn *conn = task->conn; | ||
1500 | struct iscsi_session *sess = conn->session; | ||
1501 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1502 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
1503 | struct cxgbi_hba *chba = cconn->chba; | ||
1504 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; | ||
1505 | u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; | ||
1506 | u32 tag = 0; | ||
1507 | int err = -EINVAL; | ||
1508 | |||
1509 | if (sc && | ||
1510 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { | ||
1511 | err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, | ||
1512 | scsi_in(sc)->length, | ||
1513 | scsi_in(sc)->table.sgl, | ||
1514 | scsi_in(sc)->table.nents, | ||
1515 | GFP_ATOMIC); | ||
1516 | if (err < 0) | ||
1517 | log_debug(1 << CXGBI_DBG_DDP, | ||
1518 | "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", | ||
1519 | cconn->cep->csk, task, scsi_in(sc)->length, | ||
1520 | scsi_in(sc)->table.nents); | ||
1521 | } | ||
1522 | |||
1523 | if (err < 0) | ||
1524 | tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); | ||
1525 | /* the itt need to sent in big-endian order */ | ||
1526 | *hdr_itt = (__force itt_t)htonl(tag); | ||
1527 | |||
1528 | log_debug(1 << CXGBI_DBG_DDP, | ||
1529 | "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", | ||
1530 | chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); | ||
1531 | return 0; | ||
1532 | } | ||
1533 | |||
1534 | void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) | ||
1535 | { | ||
1536 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1537 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
1538 | struct cxgbi_device *cdev = cconn->chba->cdev; | ||
1539 | u32 tag = ntohl((__force u32) itt); | ||
1540 | u32 sw_bits; | ||
1541 | |||
1542 | sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); | ||
1543 | if (idx) | ||
1544 | *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); | ||
1545 | if (age) | ||
1546 | *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; | ||
1547 | |||
1548 | log_debug(1 << CXGBI_DBG_DDP, | ||
1549 | "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", | ||
1550 | cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, | ||
1551 | age ? *age : 0xFF); | ||
1552 | } | ||
1553 | EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); | ||
1554 | |||
1555 | void cxgbi_conn_tx_open(struct cxgbi_sock *csk) | ||
1556 | { | ||
1557 | struct iscsi_conn *conn = csk->user_data; | ||
1558 | |||
1559 | if (conn) { | ||
1560 | log_debug(1 << CXGBI_DBG_SOCK, | ||
1561 | "csk 0x%p, cid %d.\n", csk, conn->id); | ||
1562 | iscsi_conn_queue_work(conn); | ||
1563 | } | ||
1564 | } | ||
1565 | EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); | ||
1566 | |||
1567 | /* | ||
1568 | * pdu receive, interact with libiscsi_tcp | ||
1569 | */ | ||
1570 | static inline int read_pdu_skb(struct iscsi_conn *conn, | ||
1571 | struct sk_buff *skb, | ||
1572 | unsigned int offset, | ||
1573 | int offloaded) | ||
1574 | { | ||
1575 | int status = 0; | ||
1576 | int bytes_read; | ||
1577 | |||
1578 | bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); | ||
1579 | switch (status) { | ||
1580 | case ISCSI_TCP_CONN_ERR: | ||
1581 | pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", | ||
1582 | skb, offset, offloaded); | ||
1583 | return -EIO; | ||
1584 | case ISCSI_TCP_SUSPENDED: | ||
1585 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1586 | "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", | ||
1587 | skb, offset, offloaded, bytes_read); | ||
1588 | /* no transfer - just have caller flush queue */ | ||
1589 | return bytes_read; | ||
1590 | case ISCSI_TCP_SKB_DONE: | ||
1591 | pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", | ||
1592 | skb, offset, offloaded); | ||
1593 | /* | ||
1594 | * pdus should always fit in the skb and we should get | ||
1595 | * segment done notifcation. | ||
1596 | */ | ||
1597 | iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); | ||
1598 | return -EFAULT; | ||
1599 | case ISCSI_TCP_SEGMENT_DONE: | ||
1600 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1601 | "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", | ||
1602 | skb, offset, offloaded, bytes_read); | ||
1603 | return bytes_read; | ||
1604 | default: | ||
1605 | pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", | ||
1606 | skb, offset, offloaded, status); | ||
1607 | return -EINVAL; | ||
1608 | } | ||
1609 | } | ||
1610 | |||
1611 | static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) | ||
1612 | { | ||
1613 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1614 | |||
1615 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1616 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", | ||
1617 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); | ||
1618 | |||
1619 | if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { | ||
1620 | pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); | ||
1621 | iscsi_conn_failure(conn, ISCSI_ERR_PROTO); | ||
1622 | return -EIO; | ||
1623 | } | ||
1624 | |||
1625 | if (conn->hdrdgst_en && | ||
1626 | cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { | ||
1627 | pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); | ||
1628 | iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); | ||
1629 | return -EIO; | ||
1630 | } | ||
1631 | |||
1632 | return read_pdu_skb(conn, skb, 0, 0); | ||
1633 | } | ||
1634 | |||
1635 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, | ||
1636 | struct sk_buff *skb, unsigned int offset) | ||
1637 | { | ||
1638 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1639 | bool offloaded = 0; | ||
1640 | int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; | ||
1641 | |||
1642 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1643 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", | ||
1644 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); | ||
1645 | |||
1646 | if (conn->datadgst_en && | ||
1647 | cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { | ||
1648 | pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", | ||
1649 | conn, lskb, cxgbi_skcb_flags(lskb)); | ||
1650 | iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); | ||
1651 | return -EIO; | ||
1652 | } | ||
1653 | |||
1654 | if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) | ||
1655 | return 0; | ||
1656 | |||
1657 | /* coalesced, add header digest length */ | ||
1658 | if (lskb == skb && conn->hdrdgst_en) | ||
1659 | offset += ISCSI_DIGEST_SIZE; | ||
1660 | |||
1661 | if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) | ||
1662 | offloaded = 1; | ||
1663 | |||
1664 | if (opcode == ISCSI_OP_SCSI_DATA_IN) | ||
1665 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1666 | "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", | ||
1667 | skb, opcode, ntohl(tcp_conn->in.hdr->itt), | ||
1668 | tcp_conn->in.datalen, offloaded ? "is" : "not"); | ||
1669 | |||
1670 | return read_pdu_skb(conn, skb, offset, offloaded); | ||
1671 | } | ||
1672 | |||
1673 | static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) | ||
1674 | { | ||
1675 | struct cxgbi_device *cdev = csk->cdev; | ||
1676 | int must_send; | ||
1677 | u32 credits; | ||
1678 | |||
1679 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1680 | "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", | ||
1681 | csk, csk->state, csk->flags, csk->tid, csk->copied_seq, | ||
1682 | csk->rcv_wup, cdev->rx_credit_thres, | ||
1683 | cdev->rcv_win); | ||
1684 | |||
1685 | if (csk->state != CTP_ESTABLISHED) | ||
1686 | return; | ||
1687 | |||
1688 | credits = csk->copied_seq - csk->rcv_wup; | ||
1689 | if (unlikely(!credits)) | ||
1690 | return; | ||
1691 | if (unlikely(cdev->rx_credit_thres == 0)) | ||
1692 | return; | ||
1693 | |||
1694 | must_send = credits + 16384 >= cdev->rcv_win; | ||
1695 | if (must_send || credits >= cdev->rx_credit_thres) | ||
1696 | csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); | ||
1697 | } | ||
1698 | |||
1699 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) | ||
1700 | { | ||
1701 | struct cxgbi_device *cdev = csk->cdev; | ||
1702 | struct iscsi_conn *conn = csk->user_data; | ||
1703 | struct sk_buff *skb; | ||
1704 | unsigned int read = 0; | ||
1705 | int err = 0; | ||
1706 | |||
1707 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1708 | "csk 0x%p, conn 0x%p.\n", csk, conn); | ||
1709 | |||
1710 | if (unlikely(!conn || conn->suspend_rx)) { | ||
1711 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1712 | "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", | ||
1713 | csk, conn, conn ? conn->id : 0xFF, | ||
1714 | conn ? conn->suspend_rx : 0xFF); | ||
1715 | read_unlock(&csk->callback_lock); | ||
1716 | return; | ||
1717 | } | ||
1718 | |||
1719 | while (!err) { | ||
1720 | read_lock(&csk->callback_lock); | ||
1721 | skb = skb_peek(&csk->receive_queue); | ||
1722 | if (!skb || | ||
1723 | !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { | ||
1724 | if (skb) | ||
1725 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1726 | "skb 0x%p, NOT ready 0x%lx.\n", | ||
1727 | skb, cxgbi_skcb_flags(skb)); | ||
1728 | read_unlock(&csk->callback_lock); | ||
1729 | break; | ||
1730 | } | ||
1731 | __skb_unlink(skb, &csk->receive_queue); | ||
1732 | read_unlock(&csk->callback_lock); | ||
1733 | |||
1734 | read += cxgbi_skcb_rx_pdulen(skb); | ||
1735 | log_debug(1 << CXGBI_DBG_PDU_RX, | ||
1736 | "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", | ||
1737 | csk, skb, skb->len, cxgbi_skcb_flags(skb), | ||
1738 | cxgbi_skcb_rx_pdulen(skb)); | ||
1739 | |||
1740 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { | ||
1741 | err = skb_read_pdu_bhs(conn, skb); | ||
1742 | if (err < 0) | ||
1743 | break; | ||
1744 | err = skb_read_pdu_data(conn, skb, skb, | ||
1745 | err + cdev->skb_rx_extra); | ||
1746 | } else { | ||
1747 | err = skb_read_pdu_bhs(conn, skb); | ||
1748 | if (err < 0) | ||
1749 | break; | ||
1750 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { | ||
1751 | struct sk_buff *dskb; | ||
1752 | |||
1753 | read_lock(&csk->callback_lock); | ||
1754 | dskb = skb_peek(&csk->receive_queue); | ||
1755 | if (!dskb) { | ||
1756 | read_unlock(&csk->callback_lock); | ||
1757 | pr_err("csk 0x%p, NO data.\n", csk); | ||
1758 | err = -EAGAIN; | ||
1759 | break; | ||
1760 | } | ||
1761 | __skb_unlink(dskb, &csk->receive_queue); | ||
1762 | read_unlock(&csk->callback_lock); | ||
1763 | |||
1764 | err = skb_read_pdu_data(conn, skb, dskb, 0); | ||
1765 | __kfree_skb(dskb); | ||
1766 | } else | ||
1767 | err = skb_read_pdu_data(conn, skb, skb, 0); | ||
1768 | } | ||
1769 | if (err < 0) | ||
1770 | break; | ||
1771 | |||
1772 | __kfree_skb(skb); | ||
1773 | } | ||
1774 | |||
1775 | log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); | ||
1776 | if (read) { | ||
1777 | csk->copied_seq += read; | ||
1778 | csk_return_rx_credits(csk, read); | ||
1779 | conn->rxdata_octets += read; | ||
1780 | } | ||
1781 | |||
1782 | if (err < 0) { | ||
1783 | pr_info("csk 0x%p, 0x%p, rx failed %d.\n", csk, conn, err); | ||
1784 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
1785 | } | ||
1786 | } | ||
1787 | EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); | ||
1788 | |||
1789 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, | ||
1790 | unsigned int offset, unsigned int *off, | ||
1791 | struct scatterlist **sgp) | ||
1792 | { | ||
1793 | int i; | ||
1794 | struct scatterlist *sg; | ||
1795 | |||
1796 | for_each_sg(sgl, sg, sgcnt, i) { | ||
1797 | if (offset < sg->length) { | ||
1798 | *off = offset; | ||
1799 | *sgp = sg; | ||
1800 | return 0; | ||
1801 | } | ||
1802 | offset -= sg->length; | ||
1803 | } | ||
1804 | return -EFAULT; | ||
1805 | } | ||
1806 | |||
1807 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, | ||
1808 | unsigned int dlen, skb_frag_t *frags, | ||
1809 | int frag_max) | ||
1810 | { | ||
1811 | unsigned int datalen = dlen; | ||
1812 | unsigned int sglen = sg->length - sgoffset; | ||
1813 | struct page *page = sg_page(sg); | ||
1814 | int i; | ||
1815 | |||
1816 | i = 0; | ||
1817 | do { | ||
1818 | unsigned int copy; | ||
1819 | |||
1820 | if (!sglen) { | ||
1821 | sg = sg_next(sg); | ||
1822 | if (!sg) { | ||
1823 | pr_warn("sg %d NULL, len %u/%u.\n", | ||
1824 | i, datalen, dlen); | ||
1825 | return -EINVAL; | ||
1826 | } | ||
1827 | sgoffset = 0; | ||
1828 | sglen = sg->length; | ||
1829 | page = sg_page(sg); | ||
1830 | |||
1831 | } | ||
1832 | copy = min(datalen, sglen); | ||
1833 | if (i && page == frags[i - 1].page && | ||
1834 | sgoffset + sg->offset == | ||
1835 | frags[i - 1].page_offset + frags[i - 1].size) { | ||
1836 | frags[i - 1].size += copy; | ||
1837 | } else { | ||
1838 | if (i >= frag_max) { | ||
1839 | pr_warn("too many pages %u, dlen %u.\n", | ||
1840 | frag_max, dlen); | ||
1841 | return -EINVAL; | ||
1842 | } | ||
1843 | |||
1844 | frags[i].page = page; | ||
1845 | frags[i].page_offset = sg->offset + sgoffset; | ||
1846 | frags[i].size = copy; | ||
1847 | i++; | ||
1848 | } | ||
1849 | datalen -= copy; | ||
1850 | sgoffset += copy; | ||
1851 | sglen -= copy; | ||
1852 | } while (datalen); | ||
1853 | |||
1854 | return i; | ||
1855 | } | ||
1856 | |||
1857 | int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | ||
1858 | { | ||
1859 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | ||
1860 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
1861 | struct cxgbi_device *cdev = cconn->chba->cdev; | ||
1862 | struct iscsi_conn *conn = task->conn; | ||
1863 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
1864 | struct cxgbi_task_data *tdata = task->dd_data + sizeof(*tcp_task); | ||
1865 | struct scsi_cmnd *sc = task->sc; | ||
1866 | int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; | ||
1867 | |||
1868 | tcp_task->dd_data = tdata; | ||
1869 | task->hdr = NULL; | ||
1870 | |||
1871 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && | ||
1872 | (opcode == ISCSI_OP_SCSI_DATA_OUT || | ||
1873 | (opcode == ISCSI_OP_SCSI_CMD && | ||
1874 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) | ||
1875 | /* data could goes into skb head */ | ||
1876 | headroom += min_t(unsigned int, | ||
1877 | SKB_MAX_HEAD(cdev->skb_tx_rsvd), | ||
1878 | conn->max_xmit_dlength); | ||
1879 | |||
1880 | tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); | ||
1881 | if (!tdata->skb) { | ||
1882 | pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", | ||
1883 | cdev->skb_tx_rsvd, headroom, opcode); | ||
1884 | return -ENOMEM; | ||
1885 | } | ||
1886 | |||
1887 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); | ||
1888 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; | ||
1889 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ | ||
1890 | |||
1891 | /* data_out uses scsi_cmd's itt */ | ||
1892 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) | ||
1893 | task_reserve_itt(task, &task->hdr->itt); | ||
1894 | |||
1895 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | ||
1896 | "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", | ||
1897 | task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, | ||
1898 | conn->max_xmit_dlength, ntohl(task->hdr->itt)); | ||
1899 | |||
1900 | return 0; | ||
1901 | } | ||
1902 | EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); | ||
1903 | |||
1904 | static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) | ||
1905 | { | ||
1906 | u8 submode = 0; | ||
1907 | |||
1908 | if (hcrc) | ||
1909 | submode |= 1; | ||
1910 | if (dcrc) | ||
1911 | submode |= 2; | ||
1912 | cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; | ||
1913 | } | ||
1914 | |||
1915 | int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | ||
1916 | unsigned int count) | ||
1917 | { | ||
1918 | struct iscsi_conn *conn = task->conn; | ||
1919 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
1920 | struct cxgbi_task_data *tdata = tcp_task->dd_data; | ||
1921 | struct sk_buff *skb = tdata->skb; | ||
1922 | unsigned int datalen = count; | ||
1923 | int i, padlen = iscsi_padding(count); | ||
1924 | struct page *pg; | ||
1925 | |||
1926 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | ||
1927 | "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", | ||
1928 | task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, | ||
1929 | ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); | ||
1930 | |||
1931 | skb_put(skb, task->hdr_len); | ||
1932 | tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); | ||
1933 | if (!count) | ||
1934 | return 0; | ||
1935 | |||
1936 | if (task->sc) { | ||
1937 | struct scsi_data_buffer *sdb = scsi_out(task->sc); | ||
1938 | struct scatterlist *sg = NULL; | ||
1939 | int err; | ||
1940 | |||
1941 | tdata->offset = offset; | ||
1942 | tdata->count = count; | ||
1943 | err = sgl_seek_offset( | ||
1944 | sdb->table.sgl, sdb->table.nents, | ||
1945 | tdata->offset, &tdata->sgoffset, &sg); | ||
1946 | if (err < 0) { | ||
1947 | pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", | ||
1948 | sdb->table.nents, tdata->offset, sdb->length); | ||
1949 | return err; | ||
1950 | } | ||
1951 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, | ||
1952 | tdata->frags, MAX_PDU_FRAGS); | ||
1953 | if (err < 0) { | ||
1954 | pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", | ||
1955 | sdb->table.nents, tdata->offset, tdata->count); | ||
1956 | return err; | ||
1957 | } | ||
1958 | tdata->nr_frags = err; | ||
1959 | |||
1960 | if (tdata->nr_frags > MAX_SKB_FRAGS || | ||
1961 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { | ||
1962 | char *dst = skb->data + task->hdr_len; | ||
1963 | skb_frag_t *frag = tdata->frags; | ||
1964 | |||
1965 | /* data fits in the skb's headroom */ | ||
1966 | for (i = 0; i < tdata->nr_frags; i++, frag++) { | ||
1967 | char *src = kmap_atomic(frag->page, | ||
1968 | KM_SOFTIRQ0); | ||
1969 | |||
1970 | memcpy(dst, src+frag->page_offset, frag->size); | ||
1971 | dst += frag->size; | ||
1972 | kunmap_atomic(src, KM_SOFTIRQ0); | ||
1973 | } | ||
1974 | if (padlen) { | ||
1975 | memset(dst, 0, padlen); | ||
1976 | padlen = 0; | ||
1977 | } | ||
1978 | skb_put(skb, count + padlen); | ||
1979 | } else { | ||
1980 | /* data fit into frag_list */ | ||
1981 | for (i = 0; i < tdata->nr_frags; i++) | ||
1982 | get_page(tdata->frags[i].page); | ||
1983 | |||
1984 | memcpy(skb_shinfo(skb)->frags, tdata->frags, | ||
1985 | sizeof(skb_frag_t) * tdata->nr_frags); | ||
1986 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; | ||
1987 | skb->len += count; | ||
1988 | skb->data_len += count; | ||
1989 | skb->truesize += count; | ||
1990 | } | ||
1991 | |||
1992 | } else { | ||
1993 | pg = virt_to_page(task->data); | ||
1994 | |||
1995 | get_page(pg); | ||
1996 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), | ||
1997 | count); | ||
1998 | skb->len += count; | ||
1999 | skb->data_len += count; | ||
2000 | skb->truesize += count; | ||
2001 | } | ||
2002 | |||
2003 | if (padlen) { | ||
2004 | i = skb_shinfo(skb)->nr_frags; | ||
2005 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | ||
2006 | virt_to_page(padding), offset_in_page(padding), | ||
2007 | padlen); | ||
2008 | |||
2009 | skb->data_len += padlen; | ||
2010 | skb->truesize += padlen; | ||
2011 | skb->len += padlen; | ||
2012 | } | ||
2013 | |||
2014 | return 0; | ||
2015 | } | ||
2016 | EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); | ||
2017 | |||
2018 | int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | ||
2019 | { | ||
2020 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | ||
2021 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
2022 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
2023 | struct cxgbi_task_data *tdata = tcp_task->dd_data; | ||
2024 | struct sk_buff *skb = tdata->skb; | ||
2025 | unsigned int datalen; | ||
2026 | int err; | ||
2027 | |||
2028 | if (!skb) { | ||
2029 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | ||
2030 | "task 0x%p, skb NULL.\n", task); | ||
2031 | return 0; | ||
2032 | } | ||
2033 | |||
2034 | datalen = skb->data_len; | ||
2035 | tdata->skb = NULL; | ||
2036 | err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); | ||
2037 | if (err > 0) { | ||
2038 | int pdulen = err; | ||
2039 | |||
2040 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
2041 | "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
2042 | task, task->sc, skb, skb->len, skb->data_len, err); | ||
2043 | |||
2044 | if (task->conn->hdrdgst_en) | ||
2045 | pdulen += ISCSI_DIGEST_SIZE; | ||
2046 | |||
2047 | if (datalen && task->conn->datadgst_en) | ||
2048 | pdulen += ISCSI_DIGEST_SIZE; | ||
2049 | |||
2050 | task->conn->txdata_octets += pdulen; | ||
2051 | return 0; | ||
2052 | } | ||
2053 | |||
2054 | if (err == -EAGAIN || err == -ENOBUFS) { | ||
2055 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
2056 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", | ||
2057 | task, skb, skb->len, skb->data_len, err); | ||
2058 | /* reset skb to send when we are called again */ | ||
2059 | tdata->skb = skb; | ||
2060 | return err; | ||
2061 | } | ||
2062 | |||
2063 | kfree_skb(skb); | ||
2064 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | ||
2065 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", | ||
2066 | task->itt, skb, skb->len, skb->data_len, err); | ||
2067 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); | ||
2068 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); | ||
2069 | return err; | ||
2070 | } | ||
2071 | EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); | ||
2072 | |||
2073 | void cxgbi_cleanup_task(struct iscsi_task *task) | ||
2074 | { | ||
2075 | struct cxgbi_task_data *tdata = task->dd_data + | ||
2076 | sizeof(struct iscsi_tcp_task); | ||
2077 | |||
2078 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2079 | "task 0x%p, skb 0x%p, itt 0x%x.\n", | ||
2080 | task, tdata->skb, task->hdr_itt); | ||
2081 | |||
2082 | /* never reached the xmit task callout */ | ||
2083 | if (tdata->skb) | ||
2084 | __kfree_skb(tdata->skb); | ||
2085 | memset(tdata, 0, sizeof(*tdata)); | ||
2086 | |||
2087 | task_release_itt(task, task->hdr_itt); | ||
2088 | iscsi_tcp_cleanup_task(task); | ||
2089 | } | ||
2090 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); | ||
2091 | |||
2092 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, | ||
2093 | struct iscsi_stats *stats) | ||
2094 | { | ||
2095 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
2096 | |||
2097 | stats->txdata_octets = conn->txdata_octets; | ||
2098 | stats->rxdata_octets = conn->rxdata_octets; | ||
2099 | stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; | ||
2100 | stats->dataout_pdus = conn->dataout_pdus_cnt; | ||
2101 | stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; | ||
2102 | stats->datain_pdus = conn->datain_pdus_cnt; | ||
2103 | stats->r2t_pdus = conn->r2t_pdus_cnt; | ||
2104 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; | ||
2105 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; | ||
2106 | stats->digest_err = 0; | ||
2107 | stats->timeout_err = 0; | ||
2108 | stats->custom_length = 1; | ||
2109 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); | ||
2110 | stats->custom[0].value = conn->eh_abort_cnt; | ||
2111 | } | ||
2112 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); | ||
2113 | |||
2114 | static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) | ||
2115 | { | ||
2116 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2117 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
2118 | struct cxgbi_device *cdev = cconn->chba->cdev; | ||
2119 | unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); | ||
2120 | unsigned int max_def = 512 * MAX_SKB_FRAGS; | ||
2121 | unsigned int max = max(max_def, headroom); | ||
2122 | |||
2123 | max = min(cconn->chba->cdev->tx_max_size, max); | ||
2124 | if (conn->max_xmit_dlength) | ||
2125 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); | ||
2126 | else | ||
2127 | conn->max_xmit_dlength = max; | ||
2128 | cxgbi_align_pdu_size(conn->max_xmit_dlength); | ||
2129 | |||
2130 | return 0; | ||
2131 | } | ||
2132 | |||
2133 | static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) | ||
2134 | { | ||
2135 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2136 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
2137 | unsigned int max = cconn->chba->cdev->rx_max_size; | ||
2138 | |||
2139 | cxgbi_align_pdu_size(max); | ||
2140 | |||
2141 | if (conn->max_recv_dlength) { | ||
2142 | if (conn->max_recv_dlength > max) { | ||
2143 | pr_err("MaxRecvDataSegmentLength %u > %u.\n", | ||
2144 | conn->max_recv_dlength, max); | ||
2145 | return -EINVAL; | ||
2146 | } | ||
2147 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); | ||
2148 | cxgbi_align_pdu_size(conn->max_recv_dlength); | ||
2149 | } else | ||
2150 | conn->max_recv_dlength = max; | ||
2151 | |||
2152 | return 0; | ||
2153 | } | ||
2154 | |||
2155 | int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | ||
2156 | enum iscsi_param param, char *buf, int buflen) | ||
2157 | { | ||
2158 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
2159 | struct iscsi_session *session = conn->session; | ||
2160 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2161 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
2162 | struct cxgbi_sock *csk = cconn->cep->csk; | ||
2163 | int value, err = 0; | ||
2164 | |||
2165 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2166 | "cls_conn 0x%p, param %d, buf(%d) %s.\n", | ||
2167 | cls_conn, param, buflen, buf); | ||
2168 | |||
2169 | switch (param) { | ||
2170 | case ISCSI_PARAM_HDRDGST_EN: | ||
2171 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
2172 | if (!err && conn->hdrdgst_en) | ||
2173 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | ||
2174 | conn->hdrdgst_en, | ||
2175 | conn->datadgst_en, 0); | ||
2176 | break; | ||
2177 | case ISCSI_PARAM_DATADGST_EN: | ||
2178 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
2179 | if (!err && conn->datadgst_en) | ||
2180 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | ||
2181 | conn->hdrdgst_en, | ||
2182 | conn->datadgst_en, 0); | ||
2183 | break; | ||
2184 | case ISCSI_PARAM_MAX_R2T: | ||
2185 | sscanf(buf, "%d", &value); | ||
2186 | if (value <= 0 || !is_power_of_2(value)) | ||
2187 | return -EINVAL; | ||
2188 | if (session->max_r2t == value) | ||
2189 | break; | ||
2190 | iscsi_tcp_r2tpool_free(session); | ||
2191 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
2192 | if (!err && iscsi_tcp_r2tpool_alloc(session)) | ||
2193 | return -ENOMEM; | ||
2194 | case ISCSI_PARAM_MAX_RECV_DLENGTH: | ||
2195 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
2196 | if (!err) | ||
2197 | err = cxgbi_conn_max_recv_dlength(conn); | ||
2198 | break; | ||
2199 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: | ||
2200 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
2201 | if (!err) | ||
2202 | err = cxgbi_conn_max_xmit_dlength(conn); | ||
2203 | break; | ||
2204 | default: | ||
2205 | return iscsi_set_param(cls_conn, param, buf, buflen); | ||
2206 | } | ||
2207 | return err; | ||
2208 | } | ||
2209 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); | ||
2210 | |||
2211 | int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, | ||
2212 | enum iscsi_param param, char *buf) | ||
2213 | { | ||
2214 | struct iscsi_conn *iconn = cls_conn->dd_data; | ||
2215 | int len; | ||
2216 | |||
2217 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2218 | "cls_conn 0x%p, param %d.\n", cls_conn, param); | ||
2219 | |||
2220 | switch (param) { | ||
2221 | case ISCSI_PARAM_CONN_PORT: | ||
2222 | spin_lock_bh(&iconn->session->lock); | ||
2223 | len = sprintf(buf, "%hu\n", iconn->portal_port); | ||
2224 | spin_unlock_bh(&iconn->session->lock); | ||
2225 | break; | ||
2226 | case ISCSI_PARAM_CONN_ADDRESS: | ||
2227 | spin_lock_bh(&iconn->session->lock); | ||
2228 | len = sprintf(buf, "%s\n", iconn->portal_address); | ||
2229 | spin_unlock_bh(&iconn->session->lock); | ||
2230 | break; | ||
2231 | default: | ||
2232 | return iscsi_conn_get_param(cls_conn, param, buf); | ||
2233 | } | ||
2234 | return len; | ||
2235 | } | ||
2236 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); | ||
2237 | |||
2238 | struct iscsi_cls_conn * | ||
2239 | cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) | ||
2240 | { | ||
2241 | struct iscsi_cls_conn *cls_conn; | ||
2242 | struct iscsi_conn *conn; | ||
2243 | struct iscsi_tcp_conn *tcp_conn; | ||
2244 | struct cxgbi_conn *cconn; | ||
2245 | |||
2246 | cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); | ||
2247 | if (!cls_conn) | ||
2248 | return NULL; | ||
2249 | |||
2250 | conn = cls_conn->dd_data; | ||
2251 | tcp_conn = conn->dd_data; | ||
2252 | cconn = tcp_conn->dd_data; | ||
2253 | cconn->iconn = conn; | ||
2254 | |||
2255 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2256 | "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", | ||
2257 | cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); | ||
2258 | |||
2259 | return cls_conn; | ||
2260 | } | ||
2261 | EXPORT_SYMBOL_GPL(cxgbi_create_conn); | ||
2262 | |||
2263 | int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, | ||
2264 | struct iscsi_cls_conn *cls_conn, | ||
2265 | u64 transport_eph, int is_leading) | ||
2266 | { | ||
2267 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
2268 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2269 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | ||
2270 | struct iscsi_endpoint *ep; | ||
2271 | struct cxgbi_endpoint *cep; | ||
2272 | struct cxgbi_sock *csk; | ||
2273 | int err; | ||
2274 | |||
2275 | ep = iscsi_lookup_endpoint(transport_eph); | ||
2276 | if (!ep) | ||
2277 | return -EINVAL; | ||
2278 | |||
2279 | /* setup ddp pagesize */ | ||
2280 | cep = ep->dd_data; | ||
2281 | csk = cep->csk; | ||
2282 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); | ||
2283 | if (err < 0) | ||
2284 | return err; | ||
2285 | |||
2286 | err = iscsi_conn_bind(cls_session, cls_conn, is_leading); | ||
2287 | if (err) | ||
2288 | return -EINVAL; | ||
2289 | |||
2290 | /* calculate the tag idx bits needed for this conn based on cmds_max */ | ||
2291 | cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; | ||
2292 | |||
2293 | write_lock(&csk->callback_lock); | ||
2294 | csk->user_data = conn; | ||
2295 | cconn->chba = cep->chba; | ||
2296 | cconn->cep = cep; | ||
2297 | cep->cconn = cconn; | ||
2298 | write_unlock(&csk->callback_lock); | ||
2299 | |||
2300 | cxgbi_conn_max_xmit_dlength(conn); | ||
2301 | cxgbi_conn_max_recv_dlength(conn); | ||
2302 | |||
2303 | spin_lock_bh(&conn->session->lock); | ||
2304 | sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); | ||
2305 | conn->portal_port = ntohs(csk->daddr.sin_port); | ||
2306 | spin_unlock_bh(&conn->session->lock); | ||
2307 | |||
2308 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2309 | "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", | ||
2310 | cls_session, cls_conn, ep, cconn, csk); | ||
2311 | /* init recv engine */ | ||
2312 | iscsi_tcp_hdr_recv_prep(tcp_conn); | ||
2313 | |||
2314 | return 0; | ||
2315 | } | ||
2316 | EXPORT_SYMBOL_GPL(cxgbi_bind_conn); | ||
2317 | |||
2318 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, | ||
2319 | u16 cmds_max, u16 qdepth, | ||
2320 | u32 initial_cmdsn) | ||
2321 | { | ||
2322 | struct cxgbi_endpoint *cep; | ||
2323 | struct cxgbi_hba *chba; | ||
2324 | struct Scsi_Host *shost; | ||
2325 | struct iscsi_cls_session *cls_session; | ||
2326 | struct iscsi_session *session; | ||
2327 | |||
2328 | if (!ep) { | ||
2329 | pr_err("missing endpoint.\n"); | ||
2330 | return NULL; | ||
2331 | } | ||
2332 | |||
2333 | cep = ep->dd_data; | ||
2334 | chba = cep->chba; | ||
2335 | shost = chba->shost; | ||
2336 | |||
2337 | BUG_ON(chba != iscsi_host_priv(shost)); | ||
2338 | |||
2339 | cls_session = iscsi_session_setup(chba->cdev->itp, shost, | ||
2340 | cmds_max, 0, | ||
2341 | sizeof(struct iscsi_tcp_task) + | ||
2342 | sizeof(struct cxgbi_task_data), | ||
2343 | initial_cmdsn, ISCSI_MAX_TARGET); | ||
2344 | if (!cls_session) | ||
2345 | return NULL; | ||
2346 | |||
2347 | session = cls_session->dd_data; | ||
2348 | if (iscsi_tcp_r2tpool_alloc(session)) | ||
2349 | goto remove_session; | ||
2350 | |||
2351 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2352 | "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); | ||
2353 | return cls_session; | ||
2354 | |||
2355 | remove_session: | ||
2356 | iscsi_session_teardown(cls_session); | ||
2357 | return NULL; | ||
2358 | } | ||
2359 | EXPORT_SYMBOL_GPL(cxgbi_create_session); | ||
2360 | |||
2361 | void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) | ||
2362 | { | ||
2363 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2364 | "cls sess 0x%p.\n", cls_session); | ||
2365 | |||
2366 | iscsi_tcp_r2tpool_free(cls_session->dd_data); | ||
2367 | iscsi_session_teardown(cls_session); | ||
2368 | } | ||
2369 | EXPORT_SYMBOL_GPL(cxgbi_destroy_session); | ||
2370 | |||
2371 | int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, | ||
2372 | char *buf, int buflen) | ||
2373 | { | ||
2374 | struct cxgbi_hba *chba = iscsi_host_priv(shost); | ||
2375 | |||
2376 | if (!chba->ndev) { | ||
2377 | shost_printk(KERN_ERR, shost, "Could not get host param. " | ||
2378 | "netdev for host not set.\n"); | ||
2379 | return -ENODEV; | ||
2380 | } | ||
2381 | |||
2382 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2383 | "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", | ||
2384 | shost, chba, chba->ndev->name, param, buflen, buf); | ||
2385 | |||
2386 | switch (param) { | ||
2387 | case ISCSI_HOST_PARAM_IPADDRESS: | ||
2388 | { | ||
2389 | __be32 addr = in_aton(buf); | ||
2390 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2391 | "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); | ||
2392 | cxgbi_set_iscsi_ipv4(chba, addr); | ||
2393 | return 0; | ||
2394 | } | ||
2395 | case ISCSI_HOST_PARAM_HWADDRESS: | ||
2396 | case ISCSI_HOST_PARAM_NETDEV_NAME: | ||
2397 | return 0; | ||
2398 | default: | ||
2399 | return iscsi_host_set_param(shost, param, buf, buflen); | ||
2400 | } | ||
2401 | } | ||
2402 | EXPORT_SYMBOL_GPL(cxgbi_set_host_param); | ||
2403 | |||
2404 | int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, | ||
2405 | char *buf) | ||
2406 | { | ||
2407 | struct cxgbi_hba *chba = iscsi_host_priv(shost); | ||
2408 | int len = 0; | ||
2409 | |||
2410 | if (!chba->ndev) { | ||
2411 | shost_printk(KERN_ERR, shost, "Could not get host param. " | ||
2412 | "netdev for host not set.\n"); | ||
2413 | return -ENODEV; | ||
2414 | } | ||
2415 | |||
2416 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2417 | "shost 0x%p, hba 0x%p,%s, param %d.\n", | ||
2418 | shost, chba, chba->ndev->name, param); | ||
2419 | |||
2420 | switch (param) { | ||
2421 | case ISCSI_HOST_PARAM_HWADDRESS: | ||
2422 | len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); | ||
2423 | break; | ||
2424 | case ISCSI_HOST_PARAM_NETDEV_NAME: | ||
2425 | len = sprintf(buf, "%s\n", chba->ndev->name); | ||
2426 | break; | ||
2427 | case ISCSI_HOST_PARAM_IPADDRESS: | ||
2428 | { | ||
2429 | __be32 addr; | ||
2430 | |||
2431 | addr = cxgbi_get_iscsi_ipv4(chba); | ||
2432 | len = sprintf(buf, "%pI4", &addr); | ||
2433 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2434 | "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); | ||
2435 | break; | ||
2436 | } | ||
2437 | default: | ||
2438 | return iscsi_host_get_param(shost, param, buf); | ||
2439 | } | ||
2440 | |||
2441 | return len; | ||
2442 | } | ||
2443 | EXPORT_SYMBOL_GPL(cxgbi_get_host_param); | ||
2444 | |||
2445 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, | ||
2446 | struct sockaddr *dst_addr, | ||
2447 | int non_blocking) | ||
2448 | { | ||
2449 | struct iscsi_endpoint *ep; | ||
2450 | struct cxgbi_endpoint *cep; | ||
2451 | struct cxgbi_hba *hba = NULL; | ||
2452 | struct cxgbi_sock *csk; | ||
2453 | int err = -EINVAL; | ||
2454 | |||
2455 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | ||
2456 | "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", | ||
2457 | shost, non_blocking, dst_addr); | ||
2458 | |||
2459 | if (shost) { | ||
2460 | hba = iscsi_host_priv(shost); | ||
2461 | if (!hba) { | ||
2462 | pr_info("shost 0x%p, priv NULL.\n", shost); | ||
2463 | goto err_out; | ||
2464 | } | ||
2465 | } | ||
2466 | |||
2467 | csk = cxgbi_check_route(dst_addr); | ||
2468 | if (IS_ERR(csk)) | ||
2469 | return (struct iscsi_endpoint *)csk; | ||
2470 | cxgbi_sock_get(csk); | ||
2471 | |||
2472 | if (!hba) | ||
2473 | hba = csk->cdev->hbas[csk->port_id]; | ||
2474 | else if (hba != csk->cdev->hbas[csk->port_id]) { | ||
2475 | pr_info("Could not connect through requested host %u" | ||
2476 | "hba 0x%p != 0x%p (%u).\n", | ||
2477 | shost->host_no, hba, | ||
2478 | csk->cdev->hbas[csk->port_id], csk->port_id); | ||
2479 | err = -ENOSPC; | ||
2480 | goto release_conn; | ||
2481 | } | ||
2482 | |||
2483 | err = sock_get_port(csk); | ||
2484 | if (err) | ||
2485 | goto release_conn; | ||
2486 | |||
2487 | cxgbi_sock_set_state(csk, CTP_CONNECTING); | ||
2488 | err = csk->cdev->csk_init_act_open(csk); | ||
2489 | if (err) | ||
2490 | goto release_conn; | ||
2491 | |||
2492 | if (cxgbi_sock_is_closing(csk)) { | ||
2493 | err = -ENOSPC; | ||
2494 | pr_info("csk 0x%p is closing.\n", csk); | ||
2495 | goto release_conn; | ||
2496 | } | ||
2497 | |||
2498 | ep = iscsi_create_endpoint(sizeof(*cep)); | ||
2499 | if (!ep) { | ||
2500 | err = -ENOMEM; | ||
2501 | pr_info("iscsi alloc ep, OOM.\n"); | ||
2502 | goto release_conn; | ||
2503 | } | ||
2504 | |||
2505 | cep = ep->dd_data; | ||
2506 | cep->csk = csk; | ||
2507 | cep->chba = hba; | ||
2508 | |||
2509 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | ||
2510 | "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", | ||
2511 | ep, cep, csk, hba, hba->ndev->name); | ||
2512 | return ep; | ||
2513 | |||
2514 | release_conn: | ||
2515 | cxgbi_sock_put(csk); | ||
2516 | cxgbi_sock_closed(csk); | ||
2517 | err_out: | ||
2518 | return ERR_PTR(err); | ||
2519 | } | ||
2520 | EXPORT_SYMBOL_GPL(cxgbi_ep_connect); | ||
2521 | |||
2522 | int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | ||
2523 | { | ||
2524 | struct cxgbi_endpoint *cep = ep->dd_data; | ||
2525 | struct cxgbi_sock *csk = cep->csk; | ||
2526 | |||
2527 | if (!cxgbi_sock_is_established(csk)) | ||
2528 | return 0; | ||
2529 | return 1; | ||
2530 | } | ||
2531 | EXPORT_SYMBOL_GPL(cxgbi_ep_poll); | ||
2532 | |||
2533 | void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) | ||
2534 | { | ||
2535 | struct cxgbi_endpoint *cep = ep->dd_data; | ||
2536 | struct cxgbi_conn *cconn = cep->cconn; | ||
2537 | struct cxgbi_sock *csk = cep->csk; | ||
2538 | |||
2539 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | ||
2540 | "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", | ||
2541 | ep, cep, cconn, csk, csk->state, csk->flags); | ||
2542 | |||
2543 | if (cconn && cconn->iconn) { | ||
2544 | iscsi_suspend_tx(cconn->iconn); | ||
2545 | write_lock_bh(&csk->callback_lock); | ||
2546 | cep->csk->user_data = NULL; | ||
2547 | cconn->cep = NULL; | ||
2548 | write_unlock_bh(&csk->callback_lock); | ||
2549 | } | ||
2550 | iscsi_destroy_endpoint(ep); | ||
2551 | |||
2552 | if (likely(csk->state >= CTP_ESTABLISHED)) | ||
2553 | need_active_close(csk); | ||
2554 | else | ||
2555 | cxgbi_sock_closed(csk); | ||
2556 | |||
2557 | cxgbi_sock_put(csk); | ||
2558 | } | ||
2559 | EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); | ||
2560 | |||
2561 | int cxgbi_iscsi_init(struct iscsi_transport *itp, | ||
2562 | struct scsi_transport_template **stt) | ||
2563 | { | ||
2564 | *stt = iscsi_register_transport(itp); | ||
2565 | if (*stt == NULL) { | ||
2566 | pr_err("unable to register %s transport 0x%p.\n", | ||
2567 | itp->name, itp); | ||
2568 | return -ENODEV; | ||
2569 | } | ||
2570 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2571 | "%s, registered iscsi transport 0x%p.\n", | ||
2572 | itp->name, stt); | ||
2573 | return 0; | ||
2574 | } | ||
2575 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); | ||
2576 | |||
2577 | void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, | ||
2578 | struct scsi_transport_template **stt) | ||
2579 | { | ||
2580 | if (*stt) { | ||
2581 | log_debug(1 << CXGBI_DBG_ISCSI, | ||
2582 | "de-register transport 0x%p, %s, stt 0x%p.\n", | ||
2583 | itp, itp->name, *stt); | ||
2584 | *stt = NULL; | ||
2585 | iscsi_unregister_transport(itp); | ||
2586 | } | ||
2587 | } | ||
2588 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); | ||
2589 | |||
2590 | static int __init libcxgbi_init_module(void) | ||
2591 | { | ||
2592 | sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; | ||
2593 | sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; | ||
2594 | |||
2595 | pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", | ||
2596 | ISCSI_ITT_MASK, sw_tag_idx_bits, | ||
2597 | ISCSI_AGE_MASK, sw_tag_age_bits); | ||
2598 | |||
2599 | ddp_setup_host_page_size(); | ||
2600 | return 0; | ||
2601 | } | ||
2602 | |||
2603 | static void __exit libcxgbi_exit_module(void) | ||
2604 | { | ||
2605 | cxgbi_device_unregister_all(0xFF); | ||
2606 | return; | ||
2607 | } | ||
2608 | |||
2609 | module_init(libcxgbi_init_module); | ||
2610 | module_exit(libcxgbi_exit_module); | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h new file mode 100644 index 000000000000..40551f3be5dc --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
@@ -0,0 +1,753 @@ | |||
1 | /* | ||
2 | * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2010 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) | ||
12 | */ | ||
13 | |||
14 | #ifndef __LIBCXGBI_H__ | ||
15 | #define __LIBCXGBI_H__ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/if_vlan.h> | ||
24 | #include <linux/scatterlist.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <scsi/scsi_device.h> | ||
28 | #include <scsi/libiscsi_tcp.h> | ||
29 | |||
30 | enum cxgbi_dbg_flag { | ||
31 | CXGBI_DBG_ISCSI, | ||
32 | CXGBI_DBG_DDP, | ||
33 | CXGBI_DBG_TOE, | ||
34 | CXGBI_DBG_SOCK, | ||
35 | |||
36 | CXGBI_DBG_PDU_TX, | ||
37 | CXGBI_DBG_PDU_RX, | ||
38 | CXGBI_DBG_DEV, | ||
39 | }; | ||
40 | |||
41 | #define log_debug(level, fmt, ...) \ | ||
42 | do { \ | ||
43 | if (dbg_level & (level)) \ | ||
44 | pr_info(fmt, ##__VA_ARGS__); \ | ||
45 | } while (0) | ||
46 | |||
47 | /* max. connections per adapter */ | ||
48 | #define CXGBI_MAX_CONN 16384 | ||
49 | |||
50 | /* always allocate rooms for AHS */ | ||
51 | #define SKB_TX_ISCSI_PDU_HEADER_MAX \ | ||
52 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) | ||
53 | |||
54 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ | ||
55 | |||
56 | /* | ||
57 | * align pdu size to multiple of 512 for better performance | ||
58 | */ | ||
59 | #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) | ||
60 | |||
61 | #define ULP2_MODE_ISCSI 2 | ||
62 | |||
63 | #define ULP2_MAX_PKT_SIZE 16224 | ||
64 | #define ULP2_MAX_PDU_PAYLOAD \ | ||
65 | (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) | ||
66 | |||
67 | /* | ||
68 | * For iscsi connections HW may inserts digest bytes into the pdu. Those digest | ||
69 | * bytes are not sent by the host but are part of the TCP payload and therefore | ||
70 | * consume TCP sequence space. | ||
71 | */ | ||
72 | static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 }; | ||
73 | static inline unsigned int cxgbi_ulp_extra_len(int submode) | ||
74 | { | ||
75 | return ulp2_extra_len[submode & 3]; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * struct pagepod_hdr, pagepod - pagepod format | ||
80 | */ | ||
81 | |||
82 | #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ | ||
83 | #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ | ||
84 | #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ | ||
85 | #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ | ||
86 | |||
87 | struct cxgbi_pagepod_hdr { | ||
88 | u32 vld_tid; | ||
89 | u32 pgsz_tag_clr; | ||
90 | u32 max_offset; | ||
91 | u32 page_offset; | ||
92 | u64 rsvd; | ||
93 | }; | ||
94 | |||
95 | #define PPOD_PAGES_MAX 4 | ||
96 | struct cxgbi_pagepod { | ||
97 | struct cxgbi_pagepod_hdr hdr; | ||
98 | u64 addr[PPOD_PAGES_MAX + 1]; | ||
99 | }; | ||
100 | |||
101 | struct cxgbi_tag_format { | ||
102 | unsigned char sw_bits; | ||
103 | unsigned char rsvd_bits; | ||
104 | unsigned char rsvd_shift; | ||
105 | unsigned char filler[1]; | ||
106 | u32 rsvd_mask; | ||
107 | }; | ||
108 | |||
109 | struct cxgbi_gather_list { | ||
110 | unsigned int tag; | ||
111 | unsigned int length; | ||
112 | unsigned int offset; | ||
113 | unsigned int nelem; | ||
114 | struct page **pages; | ||
115 | dma_addr_t phys_addr[0]; | ||
116 | }; | ||
117 | |||
118 | struct cxgbi_ddp_info { | ||
119 | struct kref refcnt; | ||
120 | struct cxgbi_device *cdev; | ||
121 | struct pci_dev *pdev; | ||
122 | unsigned int max_txsz; | ||
123 | unsigned int max_rxsz; | ||
124 | unsigned int llimit; | ||
125 | unsigned int ulimit; | ||
126 | unsigned int nppods; | ||
127 | unsigned int idx_last; | ||
128 | unsigned char idx_bits; | ||
129 | unsigned char filler[3]; | ||
130 | unsigned int idx_mask; | ||
131 | unsigned int rsvd_tag_mask; | ||
132 | spinlock_t map_lock; | ||
133 | struct cxgbi_gather_list **gl_map; | ||
134 | struct sk_buff **gl_skb; | ||
135 | }; | ||
136 | |||
137 | #define DDP_PGIDX_MAX 4 | ||
138 | #define DDP_THRESHOLD 2048 | ||
139 | |||
140 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ | ||
141 | |||
142 | #define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ | ||
143 | #define PPOD_SIZE_SHIFT 6 | ||
144 | |||
145 | #define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ | ||
146 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ | ||
147 | #define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ | ||
148 | |||
149 | #define PPOD_COLOR_SHIFT 0 | ||
150 | #define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) | ||
151 | |||
152 | #define PPOD_IDX_SHIFT 6 | ||
153 | #define PPOD_IDX_MAX_SIZE 24 | ||
154 | |||
155 | #define PPOD_TID_SHIFT 0 | ||
156 | #define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) | ||
157 | |||
158 | #define PPOD_TAG_SHIFT 6 | ||
159 | #define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) | ||
160 | |||
161 | #define PPOD_VALID_SHIFT 24 | ||
162 | #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) | ||
163 | #define PPOD_VALID_FLAG PPOD_VALID(1U) | ||
164 | |||
165 | #define W_TCB_ULP_TYPE 0 | ||
166 | #define TCB_ULP_TYPE_SHIFT 0 | ||
167 | #define TCB_ULP_TYPE_MASK 0xfULL | ||
168 | #define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) | ||
169 | |||
170 | #define W_TCB_ULP_RAW 0 | ||
171 | #define TCB_ULP_RAW_SHIFT 4 | ||
172 | #define TCB_ULP_RAW_MASK 0xffULL | ||
173 | #define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) | ||
174 | |||
175 | /* | ||
176 | * sge_opaque_hdr - | ||
177 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets | ||
178 | * and for which we must reserve space. | ||
179 | */ | ||
180 | struct sge_opaque_hdr { | ||
181 | void *dev; | ||
182 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
183 | }; | ||
184 | |||
185 | struct cxgbi_sock { | ||
186 | struct cxgbi_device *cdev; | ||
187 | |||
188 | int tid; | ||
189 | int atid; | ||
190 | unsigned long flags; | ||
191 | unsigned int mtu; | ||
192 | unsigned short rss_qid; | ||
193 | unsigned short txq_idx; | ||
194 | unsigned short advmss; | ||
195 | unsigned int tx_chan; | ||
196 | unsigned int rx_chan; | ||
197 | unsigned int mss_idx; | ||
198 | unsigned int smac_idx; | ||
199 | unsigned char port_id; | ||
200 | int wr_max_cred; | ||
201 | int wr_cred; | ||
202 | int wr_una_cred; | ||
203 | unsigned char hcrc_len; | ||
204 | unsigned char dcrc_len; | ||
205 | |||
206 | void *l2t; | ||
207 | struct sk_buff *wr_pending_head; | ||
208 | struct sk_buff *wr_pending_tail; | ||
209 | struct sk_buff *cpl_close; | ||
210 | struct sk_buff *cpl_abort_req; | ||
211 | struct sk_buff *cpl_abort_rpl; | ||
212 | struct sk_buff *skb_ulp_lhdr; | ||
213 | spinlock_t lock; | ||
214 | struct kref refcnt; | ||
215 | unsigned int state; | ||
216 | struct sockaddr_in saddr; | ||
217 | struct sockaddr_in daddr; | ||
218 | struct dst_entry *dst; | ||
219 | struct sk_buff_head receive_queue; | ||
220 | struct sk_buff_head write_queue; | ||
221 | struct timer_list retry_timer; | ||
222 | int err; | ||
223 | rwlock_t callback_lock; | ||
224 | void *user_data; | ||
225 | |||
226 | u32 rcv_nxt; | ||
227 | u32 copied_seq; | ||
228 | u32 rcv_wup; | ||
229 | u32 snd_nxt; | ||
230 | u32 snd_una; | ||
231 | u32 write_seq; | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * connection states | ||
236 | */ | ||
237 | enum cxgbi_sock_states{ | ||
238 | CTP_CLOSED, | ||
239 | CTP_CONNECTING, | ||
240 | CTP_ACTIVE_OPEN, | ||
241 | CTP_ESTABLISHED, | ||
242 | CTP_ACTIVE_CLOSE, | ||
243 | CTP_PASSIVE_CLOSE, | ||
244 | CTP_CLOSE_WAIT_1, | ||
245 | CTP_CLOSE_WAIT_2, | ||
246 | CTP_ABORTING, | ||
247 | }; | ||
248 | |||
249 | /* | ||
250 | * Connection flags -- many to track some close related events. | ||
251 | */ | ||
252 | enum cxgbi_sock_flags { | ||
253 | CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ | ||
254 | CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ | ||
255 | CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ | ||
256 | CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ | ||
257 | CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ | ||
258 | CTPF_HAS_ATID, /* reserved atid */ | ||
259 | CTPF_HAS_TID, /* reserved hw tid */ | ||
260 | CTPF_OFFLOAD_DOWN, /* offload function off */ | ||
261 | }; | ||
262 | |||
263 | struct cxgbi_skb_rx_cb { | ||
264 | __u32 ddigest; | ||
265 | __u32 pdulen; | ||
266 | }; | ||
267 | |||
268 | struct cxgbi_skb_tx_cb { | ||
269 | void *l2t; | ||
270 | struct sk_buff *wr_next; | ||
271 | }; | ||
272 | |||
273 | enum cxgbi_skcb_flags { | ||
274 | SKCBF_TX_NEED_HDR, /* packet needs a header */ | ||
275 | SKCBF_RX_COALESCED, /* received whole pdu */ | ||
276 | SKCBF_RX_HDR, /* recieved pdu header */ | ||
277 | SKCBF_RX_DATA, /* recieved pdu payload */ | ||
278 | SKCBF_RX_STATUS, /* recieved ddp status */ | ||
279 | SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ | ||
280 | SKCBF_RX_HCRC_ERR, /* header digest error */ | ||
281 | SKCBF_RX_DCRC_ERR, /* data digest error */ | ||
282 | SKCBF_RX_PAD_ERR, /* padding byte error */ | ||
283 | }; | ||
284 | |||
285 | struct cxgbi_skb_cb { | ||
286 | unsigned char ulp_mode; | ||
287 | unsigned long flags; | ||
288 | unsigned int seq; | ||
289 | union { | ||
290 | struct cxgbi_skb_rx_cb rx; | ||
291 | struct cxgbi_skb_tx_cb tx; | ||
292 | }; | ||
293 | }; | ||
294 | |||
295 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) | ||
296 | #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) | ||
297 | #define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode) | ||
298 | #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) | ||
299 | #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) | ||
300 | #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) | ||
301 | #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) | ||
302 | |||
303 | static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, | ||
304 | enum cxgbi_skcb_flags flag) | ||
305 | { | ||
306 | __set_bit(flag, &(cxgbi_skcb_flags(skb))); | ||
307 | } | ||
308 | |||
309 | static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, | ||
310 | enum cxgbi_skcb_flags flag) | ||
311 | { | ||
312 | __clear_bit(flag, &(cxgbi_skcb_flags(skb))); | ||
313 | } | ||
314 | |||
315 | static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, | ||
316 | enum cxgbi_skcb_flags flag) | ||
317 | { | ||
318 | return test_bit(flag, &(cxgbi_skcb_flags(skb))); | ||
319 | } | ||
320 | |||
321 | static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, | ||
322 | enum cxgbi_sock_flags flag) | ||
323 | { | ||
324 | __set_bit(flag, &csk->flags); | ||
325 | log_debug(1 << CXGBI_DBG_SOCK, | ||
326 | "csk 0x%p,%u,0x%lx, bit %d.\n", | ||
327 | csk, csk->state, csk->flags, flag); | ||
328 | } | ||
329 | |||
330 | static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, | ||
331 | enum cxgbi_sock_flags flag) | ||
332 | { | ||
333 | __clear_bit(flag, &csk->flags); | ||
334 | log_debug(1 << CXGBI_DBG_SOCK, | ||
335 | "csk 0x%p,%u,0x%lx, bit %d.\n", | ||
336 | csk, csk->state, csk->flags, flag); | ||
337 | } | ||
338 | |||
339 | static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, | ||
340 | enum cxgbi_sock_flags flag) | ||
341 | { | ||
342 | if (csk == NULL) | ||
343 | return 0; | ||
344 | return test_bit(flag, &csk->flags); | ||
345 | } | ||
346 | |||
347 | static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) | ||
348 | { | ||
349 | log_debug(1 << CXGBI_DBG_SOCK, | ||
350 | "csk 0x%p,%u,0x%lx, state -> %u.\n", | ||
351 | csk, csk->state, csk->flags, state); | ||
352 | csk->state = state; | ||
353 | } | ||
354 | |||
355 | static inline void cxgbi_sock_free(struct kref *kref) | ||
356 | { | ||
357 | struct cxgbi_sock *csk = container_of(kref, | ||
358 | struct cxgbi_sock, | ||
359 | refcnt); | ||
360 | if (csk) { | ||
361 | log_debug(1 << CXGBI_DBG_SOCK, | ||
362 | "free csk 0x%p, state %u, flags 0x%lx\n", | ||
363 | csk, csk->state, csk->flags); | ||
364 | kfree(csk); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) | ||
369 | { | ||
370 | log_debug(1 << CXGBI_DBG_SOCK, | ||
371 | "%s, put csk 0x%p, ref %u-1.\n", | ||
372 | fn, csk, atomic_read(&csk->refcnt.refcount)); | ||
373 | kref_put(&csk->refcnt, cxgbi_sock_free); | ||
374 | } | ||
375 | #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) | ||
376 | |||
377 | static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) | ||
378 | { | ||
379 | log_debug(1 << CXGBI_DBG_SOCK, | ||
380 | "%s, get csk 0x%p, ref %u+1.\n", | ||
381 | fn, csk, atomic_read(&csk->refcnt.refcount)); | ||
382 | kref_get(&csk->refcnt); | ||
383 | } | ||
384 | #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) | ||
385 | |||
386 | static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) | ||
387 | { | ||
388 | return csk->state >= CTP_ACTIVE_CLOSE; | ||
389 | } | ||
390 | |||
391 | static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) | ||
392 | { | ||
393 | return csk->state == CTP_ESTABLISHED; | ||
394 | } | ||
395 | |||
396 | static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) | ||
397 | { | ||
398 | struct sk_buff *skb; | ||
399 | |||
400 | while ((skb = __skb_dequeue(&csk->write_queue))) | ||
401 | __kfree_skb(skb); | ||
402 | } | ||
403 | |||
404 | static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) | ||
405 | { | ||
406 | unsigned int wscale = 0; | ||
407 | |||
408 | while (wscale < 14 && (65535 << wscale) < win) | ||
409 | wscale++; | ||
410 | return wscale; | ||
411 | } | ||
412 | |||
413 | static inline struct sk_buff *alloc_cpl(int cpl_len, int dlen, gfp_t gfp) | ||
414 | { | ||
415 | int wrlen = roundup(cpl_len, 16); | ||
416 | struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); | ||
417 | |||
418 | if (skb) { | ||
419 | __skb_put(skb, wrlen); | ||
420 | memset(skb->head, 0, wrlen + dlen); | ||
421 | } else | ||
422 | pr_info("alloc cpl skb %u+%u, OOM.\n", cpl_len, dlen); | ||
423 | return skb; | ||
424 | } | ||
425 | |||
426 | |||
427 | /* | ||
428 | * The number of WRs needed for an skb depends on the number of fragments | ||
429 | * in the skb and whether it has any payload in its main body. This maps the | ||
430 | * length of the gather list represented by an skb into the # of necessary WRs. | ||
431 | * The extra two fragments are for iscsi bhs and payload padding. | ||
432 | */ | ||
433 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) | ||
434 | |||
435 | static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) | ||
436 | { | ||
437 | csk->wr_pending_head = csk->wr_pending_tail = NULL; | ||
438 | } | ||
439 | |||
440 | static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, | ||
441 | struct sk_buff *skb) | ||
442 | { | ||
443 | cxgbi_skcb_tx_wr_next(skb) = NULL; | ||
444 | /* | ||
445 | * We want to take an extra reference since both us and the driver | ||
446 | * need to free the packet before it's really freed. We know there's | ||
447 | * just one user currently so we use atomic_set rather than skb_get | ||
448 | * to avoid the atomic op. | ||
449 | */ | ||
450 | atomic_set(&skb->users, 2); | ||
451 | |||
452 | if (!csk->wr_pending_head) | ||
453 | csk->wr_pending_head = skb; | ||
454 | else | ||
455 | cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; | ||
456 | csk->wr_pending_tail = skb; | ||
457 | } | ||
458 | |||
459 | static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) | ||
460 | { | ||
461 | int n = 0; | ||
462 | const struct sk_buff *skb = csk->wr_pending_head; | ||
463 | |||
464 | while (skb) { | ||
465 | n += skb->csum; | ||
466 | skb = cxgbi_skcb_tx_wr_next(skb); | ||
467 | } | ||
468 | return n; | ||
469 | } | ||
470 | |||
471 | static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) | ||
472 | { | ||
473 | return csk->wr_pending_head; | ||
474 | } | ||
475 | |||
476 | static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) | ||
477 | { | ||
478 | struct sk_buff *skb = csk->wr_pending_head; | ||
479 | |||
480 | if (likely(skb)) { | ||
481 | csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); | ||
482 | cxgbi_skcb_tx_wr_next(skb) = NULL; | ||
483 | } | ||
484 | return skb; | ||
485 | } | ||
486 | |||
487 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); | ||
488 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); | ||
489 | void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); | ||
490 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); | ||
491 | void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); | ||
492 | void cxgbi_sock_closed(struct cxgbi_sock *); | ||
493 | void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); | ||
494 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); | ||
495 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); | ||
496 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); | ||
497 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, | ||
498 | int); | ||
499 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); | ||
500 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); | ||
501 | |||
502 | struct cxgbi_hba { | ||
503 | struct net_device *ndev; | ||
504 | struct Scsi_Host *shost; | ||
505 | struct cxgbi_device *cdev; | ||
506 | __be32 ipv4addr; | ||
507 | unsigned char port_id; | ||
508 | }; | ||
509 | |||
510 | struct cxgbi_ports_map { | ||
511 | unsigned int max_connect; | ||
512 | unsigned int used; | ||
513 | unsigned short sport_base; | ||
514 | spinlock_t lock; | ||
515 | unsigned int next; | ||
516 | struct cxgbi_sock **port_csk; | ||
517 | }; | ||
518 | |||
519 | #define CXGBI_FLAG_DEV_T3 0x1 | ||
520 | #define CXGBI_FLAG_DEV_T4 0x2 | ||
521 | #define CXGBI_FLAG_ADAPTER_RESET 0x4 | ||
522 | #define CXGBI_FLAG_IPV4_SET 0x10 | ||
523 | struct cxgbi_device { | ||
524 | struct list_head list_head; | ||
525 | unsigned int flags; | ||
526 | struct net_device **ports; | ||
527 | void *lldev; | ||
528 | struct cxgbi_hba **hbas; | ||
529 | const unsigned short *mtus; | ||
530 | unsigned char nmtus; | ||
531 | unsigned char nports; | ||
532 | struct pci_dev *pdev; | ||
533 | struct dentry *debugfs_root; | ||
534 | struct iscsi_transport *itp; | ||
535 | |||
536 | unsigned int pfvf; | ||
537 | unsigned int snd_win; | ||
538 | unsigned int rcv_win; | ||
539 | unsigned int rx_credit_thres; | ||
540 | unsigned int skb_tx_rsvd; | ||
541 | unsigned int skb_rx_extra; /* for msg coalesced mode */ | ||
542 | unsigned int tx_max_size; | ||
543 | unsigned int rx_max_size; | ||
544 | struct cxgbi_ports_map pmap; | ||
545 | struct cxgbi_tag_format tag_format; | ||
546 | struct cxgbi_ddp_info *ddp; | ||
547 | |||
548 | void (*dev_ddp_cleanup)(struct cxgbi_device *); | ||
549 | void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); | ||
550 | int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); | ||
551 | int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, | ||
552 | unsigned int, unsigned int, | ||
553 | struct cxgbi_gather_list *); | ||
554 | void (*csk_ddp_clear)(struct cxgbi_hba *, | ||
555 | unsigned int, unsigned int, unsigned int); | ||
556 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, | ||
557 | unsigned int, int, int, int); | ||
558 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, | ||
559 | unsigned int, int, bool); | ||
560 | |||
561 | void (*csk_release_offload_resources)(struct cxgbi_sock *); | ||
562 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); | ||
563 | u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); | ||
564 | int (*csk_push_tx_frames)(struct cxgbi_sock *, int); | ||
565 | void (*csk_send_abort_req)(struct cxgbi_sock *); | ||
566 | void (*csk_send_close_req)(struct cxgbi_sock *); | ||
567 | int (*csk_alloc_cpls)(struct cxgbi_sock *); | ||
568 | int (*csk_init_act_open)(struct cxgbi_sock *); | ||
569 | |||
570 | void *dd_data; | ||
571 | }; | ||
572 | #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) | ||
573 | |||
574 | struct cxgbi_conn { | ||
575 | struct cxgbi_endpoint *cep; | ||
576 | struct iscsi_conn *iconn; | ||
577 | struct cxgbi_hba *chba; | ||
578 | u32 task_idx_bits; | ||
579 | }; | ||
580 | |||
581 | struct cxgbi_endpoint { | ||
582 | struct cxgbi_conn *cconn; | ||
583 | struct cxgbi_hba *chba; | ||
584 | struct cxgbi_sock *csk; | ||
585 | }; | ||
586 | |||
587 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) | ||
588 | struct cxgbi_task_data { | ||
589 | unsigned short nr_frags; | ||
590 | skb_frag_t frags[MAX_PDU_FRAGS]; | ||
591 | struct sk_buff *skb; | ||
592 | unsigned int offset; | ||
593 | unsigned int count; | ||
594 | unsigned int sgoffset; | ||
595 | }; | ||
596 | |||
597 | static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) | ||
598 | { | ||
599 | return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); | ||
600 | } | ||
601 | |||
602 | static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, | ||
603 | u32 sw_tag) | ||
604 | { | ||
605 | sw_tag >>= (32 - tformat->rsvd_bits); | ||
606 | return !sw_tag; | ||
607 | } | ||
608 | |||
609 | static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, | ||
610 | u32 sw_tag) | ||
611 | { | ||
612 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | ||
613 | u32 mask = (1 << shift) - 1; | ||
614 | |||
615 | if (sw_tag && (sw_tag & ~mask)) { | ||
616 | u32 v1 = sw_tag & ((1 << shift) - 1); | ||
617 | u32 v2 = (sw_tag >> (shift - 1)) << shift; | ||
618 | |||
619 | return v2 | v1 | 1 << shift; | ||
620 | } | ||
621 | |||
622 | return sw_tag | 1 << shift; | ||
623 | } | ||
624 | |||
625 | static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, | ||
626 | u32 sw_tag) | ||
627 | { | ||
628 | u32 mask = (1 << tformat->rsvd_shift) - 1; | ||
629 | |||
630 | if (sw_tag && (sw_tag & ~mask)) { | ||
631 | u32 v1 = sw_tag & mask; | ||
632 | u32 v2 = sw_tag >> tformat->rsvd_shift; | ||
633 | |||
634 | v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; | ||
635 | |||
636 | return v2 | v1; | ||
637 | } | ||
638 | |||
639 | return sw_tag; | ||
640 | } | ||
641 | |||
642 | static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, | ||
643 | u32 tag) | ||
644 | { | ||
645 | if (cxgbi_is_ddp_tag(tformat, tag)) | ||
646 | return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, | ||
652 | u32 tag) | ||
653 | { | ||
654 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | ||
655 | u32 v1, v2; | ||
656 | |||
657 | if (cxgbi_is_ddp_tag(tformat, tag)) { | ||
658 | v1 = tag & ((1 << tformat->rsvd_shift) - 1); | ||
659 | v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; | ||
660 | } else { | ||
661 | u32 mask = (1 << shift) - 1; | ||
662 | tag &= ~(1 << shift); | ||
663 | v1 = tag & mask; | ||
664 | v2 = (tag >> 1) & ~mask; | ||
665 | } | ||
666 | return v1 | v2; | ||
667 | } | ||
668 | |||
669 | static inline void *cxgbi_alloc_big_mem(unsigned int size, | ||
670 | gfp_t gfp) | ||
671 | { | ||
672 | void *p = kmalloc(size, gfp); | ||
673 | if (!p) | ||
674 | p = vmalloc(size); | ||
675 | if (p) | ||
676 | memset(p, 0, size); | ||
677 | return p; | ||
678 | } | ||
679 | |||
680 | static inline void cxgbi_free_big_mem(void *addr) | ||
681 | { | ||
682 | if (is_vmalloc_addr(addr)) | ||
683 | vfree(addr); | ||
684 | else | ||
685 | kfree(addr); | ||
686 | } | ||
687 | |||
688 | static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) | ||
689 | { | ||
690 | if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) | ||
691 | chba->ipv4addr = ipaddr; | ||
692 | else | ||
693 | pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n", | ||
694 | chba->ndev->name); | ||
695 | } | ||
696 | |||
697 | static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba) | ||
698 | { | ||
699 | return chba->ipv4addr; | ||
700 | } | ||
701 | |||
702 | struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); | ||
703 | void cxgbi_device_unregister(struct cxgbi_device *); | ||
704 | void cxgbi_device_unregister_all(unsigned int flag); | ||
705 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); | ||
706 | int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, | ||
707 | struct scsi_host_template *, | ||
708 | struct scsi_transport_template *); | ||
709 | void cxgbi_hbas_remove(struct cxgbi_device *); | ||
710 | |||
711 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, | ||
712 | unsigned int max_conn); | ||
713 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); | ||
714 | |||
715 | void cxgbi_conn_tx_open(struct cxgbi_sock *); | ||
716 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *); | ||
717 | int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); | ||
718 | int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); | ||
719 | int cxgbi_conn_xmit_pdu(struct iscsi_task *); | ||
720 | |||
721 | void cxgbi_cleanup_task(struct iscsi_task *task); | ||
722 | |||
723 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); | ||
724 | int cxgbi_set_conn_param(struct iscsi_cls_conn *, | ||
725 | enum iscsi_param, char *, int); | ||
726 | int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); | ||
727 | struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); | ||
728 | int cxgbi_bind_conn(struct iscsi_cls_session *, | ||
729 | struct iscsi_cls_conn *, u64, int); | ||
730 | void cxgbi_destroy_session(struct iscsi_cls_session *); | ||
731 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, | ||
732 | u16, u16, u32); | ||
733 | int cxgbi_set_host_param(struct Scsi_Host *, | ||
734 | enum iscsi_host_param, char *, int); | ||
735 | int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); | ||
736 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, | ||
737 | struct sockaddr *, int); | ||
738 | int cxgbi_ep_poll(struct iscsi_endpoint *, int); | ||
739 | void cxgbi_ep_disconnect(struct iscsi_endpoint *); | ||
740 | |||
741 | int cxgbi_iscsi_init(struct iscsi_transport *, | ||
742 | struct scsi_transport_template **); | ||
743 | void cxgbi_iscsi_cleanup(struct iscsi_transport *, | ||
744 | struct scsi_transport_template **); | ||
745 | void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); | ||
746 | int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, | ||
747 | unsigned int, unsigned int); | ||
748 | int cxgbi_ddp_cleanup(struct cxgbi_device *); | ||
749 | void cxgbi_ddp_page_size_factor(int *); | ||
750 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); | ||
751 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, | ||
752 | struct cxgbi_gather_list *, unsigned int); | ||
753 | #endif /*__LIBCXGBI_H__*/ | ||