diff options
author | kxie@chelsio.com <kxie@chelsio.com> | 2010-08-16 23:55:53 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-09-05 13:29:23 -0400 |
commit | 6f7efaabefebfbc523ea9776e3663a2d81b86399 (patch) | |
tree | b77eb425e2320e0a15d149723b190ac09062b5ee /drivers | |
parent | 7b36b6e03b0d6cee0948593a6a11841a457695b9 (diff) |
[SCSI] cxgb3i: change cxgb3i to use libcxgbi
Signed-off-by: Karen Xie <kxie@chelsio.com>
Reviewed-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/Kconfig | 1 | ||||
-rw-r--r-- | drivers/scsi/Makefile | 2 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i.h | 161 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_ddp.c | 773 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_ddp.h | 312 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_init.c | 132 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 1018 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_offload.c | 1944 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_offload.h | 243 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_pdu.c | 495 | ||||
-rw-r--r-- | drivers/scsi/cxgb3i/cxgb3i_pdu.h | 59 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/Kconfig | 1 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb3i/Kbuild (renamed from drivers/scsi/cxgb3i/Kbuild) | 1 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb3i/Kconfig (renamed from drivers/scsi/cxgb3i/Kconfig) | 4 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 1432 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb3i/cxgb3i.h | 51 |
17 files changed, 1488 insertions, 5142 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index bee3aef230dc..a6fdcf4cf74a 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -378,7 +378,6 @@ config ISCSI_BOOT_SYSFS | |||
378 | via sysfs to userspace. If you wish to export this information, | 378 | via sysfs to userspace. If you wish to export this information, |
379 | say Y. Otherwise, say N. | 379 | say Y. Otherwise, say N. |
380 | 380 | ||
381 | source "drivers/scsi/cxgb3i/Kconfig" | ||
382 | source "drivers/scsi/cxgbi/Kconfig" | 381 | source "drivers/scsi/cxgbi/Kconfig" |
383 | source "drivers/scsi/bnx2i/Kconfig" | 382 | source "drivers/scsi/bnx2i/Kconfig" |
384 | source "drivers/scsi/be2iscsi/Kconfig" | 383 | source "drivers/scsi/be2iscsi/Kconfig" |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index cb31f8cf09d4..2e9a87e8e7d8 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -133,7 +133,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | |||
133 | obj-$(CONFIG_SCSI_STEX) += stex.o | 133 | obj-$(CONFIG_SCSI_STEX) += stex.o |
134 | obj-$(CONFIG_SCSI_MVSAS) += mvsas/ | 134 | obj-$(CONFIG_SCSI_MVSAS) += mvsas/ |
135 | obj-$(CONFIG_PS3_ROM) += ps3rom.o | 135 | obj-$(CONFIG_PS3_ROM) += ps3rom.o |
136 | obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ | 136 | obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ |
137 | obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ | 137 | obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ |
138 | obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ | 138 | obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ |
139 | obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ | 139 | obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ |
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h deleted file mode 100644 index e3133b58e594..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ /dev/null | |||
@@ -1,161 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i.h: Chelsio S3xx iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #ifndef __CXGB3I_H__ | ||
14 | #define __CXGB3I_H__ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/netdevice.h> | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <scsi/libiscsi_tcp.h> | ||
25 | |||
26 | /* from cxgb3 LLD */ | ||
27 | #include "common.h" | ||
28 | #include "t3_cpl.h" | ||
29 | #include "t3cdev.h" | ||
30 | #include "cxgb3_ctl_defs.h" | ||
31 | #include "cxgb3_offload.h" | ||
32 | #include "firmware_exports.h" | ||
33 | |||
34 | #include "cxgb3i_offload.h" | ||
35 | #include "cxgb3i_ddp.h" | ||
36 | |||
37 | #define CXGB3I_SCSI_HOST_QDEPTH 1024 | ||
38 | #define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN | ||
39 | #define CXGB3I_MAX_LUN 512 | ||
40 | #define ISCSI_PDU_NONPAYLOAD_MAX \ | ||
41 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE) | ||
42 | |||
43 | struct cxgb3i_adapter; | ||
44 | struct cxgb3i_hba; | ||
45 | struct cxgb3i_endpoint; | ||
46 | |||
47 | /** | ||
48 | * struct cxgb3i_hba - cxgb3i iscsi structure (per port) | ||
49 | * | ||
50 | * @snic: cxgb3i adapter containing this port | ||
51 | * @ndev: pointer to netdev structure | ||
52 | * @shost: pointer to scsi host structure | ||
53 | */ | ||
54 | struct cxgb3i_hba { | ||
55 | struct cxgb3i_adapter *snic; | ||
56 | struct net_device *ndev; | ||
57 | struct Scsi_Host *shost; | ||
58 | }; | ||
59 | |||
60 | /** | ||
61 | * struct cxgb3i_adapter - cxgb3i adapter structure (per pci) | ||
62 | * | ||
63 | * @listhead: list head to link elements | ||
64 | * @lock: lock for this structure | ||
65 | * @tdev: pointer to t3cdev used by cxgb3 driver | ||
66 | * @pdev: pointer to pci dev | ||
67 | * @hba_cnt: # of hbas (the same as # of ports) | ||
68 | * @hba: all the hbas on this adapter | ||
69 | * @flags: bit flag for adapter event/status | ||
70 | * @tx_max_size: max. tx packet size supported | ||
71 | * @rx_max_size: max. rx packet size supported | ||
72 | * @tag_format: ddp tag format settings | ||
73 | */ | ||
74 | #define CXGB3I_ADAPTER_FLAG_RESET 0x1 | ||
75 | struct cxgb3i_adapter { | ||
76 | struct list_head list_head; | ||
77 | spinlock_t lock; | ||
78 | struct t3cdev *tdev; | ||
79 | struct pci_dev *pdev; | ||
80 | unsigned char hba_cnt; | ||
81 | struct cxgb3i_hba *hba[MAX_NPORTS]; | ||
82 | |||
83 | unsigned int flags; | ||
84 | unsigned int tx_max_size; | ||
85 | unsigned int rx_max_size; | ||
86 | |||
87 | struct cxgb3i_tag_format tag_format; | ||
88 | }; | ||
89 | |||
90 | /** | ||
91 | * struct cxgb3i_conn - cxgb3i iscsi connection | ||
92 | * | ||
93 | * @listhead: list head to link elements | ||
94 | * @cep: pointer to iscsi_endpoint structure | ||
95 | * @conn: pointer to iscsi_conn structure | ||
96 | * @hba: pointer to the hba this conn. is going through | ||
97 | * @task_idx_bits: # of bits needed for session->cmds_max | ||
98 | */ | ||
99 | struct cxgb3i_conn { | ||
100 | struct list_head list_head; | ||
101 | struct cxgb3i_endpoint *cep; | ||
102 | struct iscsi_conn *conn; | ||
103 | struct cxgb3i_hba *hba; | ||
104 | unsigned int task_idx_bits; | ||
105 | }; | ||
106 | |||
107 | /** | ||
108 | * struct cxgb3i_endpoint - iscsi tcp endpoint | ||
109 | * | ||
110 | * @c3cn: the h/w tcp connection representation | ||
111 | * @hba: pointer to the hba this conn. is going through | ||
112 | * @cconn: pointer to the associated cxgb3i iscsi connection | ||
113 | */ | ||
114 | struct cxgb3i_endpoint { | ||
115 | struct s3_conn *c3cn; | ||
116 | struct cxgb3i_hba *hba; | ||
117 | struct cxgb3i_conn *cconn; | ||
118 | }; | ||
119 | |||
120 | /** | ||
121 | * struct cxgb3i_task_data - private iscsi task data | ||
122 | * | ||
123 | * @nr_frags: # of coalesced page frags (from scsi sgl) | ||
124 | * @frags: coalesced page frags (from scsi sgl) | ||
125 | * @skb: tx pdu skb | ||
126 | * @offset: data offset for the next pdu | ||
127 | * @count: max. possible pdu payload | ||
128 | * @sgoffset: offset to the first sg entry for a given offset | ||
129 | */ | ||
130 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) | ||
131 | struct cxgb3i_task_data { | ||
132 | unsigned short nr_frags; | ||
133 | skb_frag_t frags[MAX_PDU_FRAGS]; | ||
134 | struct sk_buff *skb; | ||
135 | unsigned int offset; | ||
136 | unsigned int count; | ||
137 | unsigned int sgoffset; | ||
138 | }; | ||
139 | |||
140 | int cxgb3i_iscsi_init(void); | ||
141 | void cxgb3i_iscsi_cleanup(void); | ||
142 | |||
143 | struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *); | ||
144 | void cxgb3i_adapter_open(struct t3cdev *); | ||
145 | void cxgb3i_adapter_close(struct t3cdev *); | ||
146 | |||
147 | struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, | ||
148 | struct net_device *); | ||
149 | void cxgb3i_hba_host_remove(struct cxgb3i_hba *); | ||
150 | |||
151 | int cxgb3i_pdu_init(void); | ||
152 | void cxgb3i_pdu_cleanup(void); | ||
153 | void cxgb3i_conn_cleanup_task(struct iscsi_task *); | ||
154 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8); | ||
155 | int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int); | ||
156 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *); | ||
157 | |||
158 | void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt); | ||
159 | int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt); | ||
160 | |||
161 | #endif | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c deleted file mode 100644 index be0e23042c76..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ /dev/null | |||
@@ -1,773 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/scatterlist.h> | ||
16 | |||
17 | /* from cxgb3 LLD */ | ||
18 | #include "common.h" | ||
19 | #include "t3_cpl.h" | ||
20 | #include "t3cdev.h" | ||
21 | #include "cxgb3_ctl_defs.h" | ||
22 | #include "cxgb3_offload.h" | ||
23 | #include "firmware_exports.h" | ||
24 | |||
25 | #include "cxgb3i_ddp.h" | ||
26 | |||
27 | #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt) | ||
28 | #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt) | ||
29 | #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt) | ||
30 | |||
31 | #ifdef __DEBUG_CXGB3I_DDP__ | ||
32 | #define ddp_log_debug(fmt, args...) \ | ||
33 | printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args) | ||
34 | #else | ||
35 | #define ddp_log_debug(fmt...) | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | * iSCSI Direct Data Placement | ||
40 | * | ||
41 | * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into | ||
42 | * pre-posted final destination host-memory buffers based on the Initiator | ||
43 | * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs. | ||
44 | * | ||
45 | * The host memory address is programmed into h/w in the format of pagepod | ||
46 | * entries. | ||
47 | * The location of the pagepod entry is encoded into ddp tag which is used or | ||
48 | * is the base for ITT/TTT. | ||
49 | */ | ||
50 | |||
51 | #define DDP_PGIDX_MAX 4 | ||
52 | #define DDP_THRESHOLD 2048 | ||
53 | static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; | ||
54 | static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; | ||
55 | static unsigned char page_idx = DDP_PGIDX_MAX; | ||
56 | |||
57 | /* | ||
58 | * functions to program the pagepod in h/w | ||
59 | */ | ||
60 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | ||
61 | { | ||
62 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | ||
63 | |||
64 | req->wr.wr_lo = 0; | ||
65 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | ||
66 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | ||
67 | V_ULPTX_CMD(ULP_MEM_WRITE)); | ||
68 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | ||
69 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | ||
70 | } | ||
71 | |||
72 | static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr, | ||
73 | unsigned int idx, unsigned int npods, | ||
74 | struct cxgb3i_gather_list *gl) | ||
75 | { | ||
76 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
77 | int i; | ||
78 | |||
79 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
80 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
81 | struct pagepod *ppod; | ||
82 | int j, pidx; | ||
83 | |||
84 | /* hold on to the skb until we clear the ddp mapping */ | ||
85 | skb_get(skb); | ||
86 | |||
87 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
88 | ppod = (struct pagepod *) | ||
89 | (skb->head + sizeof(struct ulp_mem_io)); | ||
90 | memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod)); | ||
91 | for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx) | ||
92 | ppod->addr[j] = pidx < gl->nelem ? | ||
93 | cpu_to_be64(gl->phys_addr[pidx]) : 0UL; | ||
94 | |||
95 | skb->priority = CPL_PRIORITY_CONTROL; | ||
96 | cxgb3_ofld_send(ddp->tdev, skb); | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag, | ||
102 | unsigned int idx, unsigned int npods) | ||
103 | { | ||
104 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
105 | int i; | ||
106 | |||
107 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
108 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
109 | |||
110 | if (!skb) { | ||
111 | ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n", | ||
112 | tag, idx, i, npods); | ||
113 | continue; | ||
114 | } | ||
115 | ddp->gl_skb[idx] = NULL; | ||
116 | memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE); | ||
117 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
118 | skb->priority = CPL_PRIORITY_CONTROL; | ||
119 | cxgb3_ofld_send(ddp->tdev, skb); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp, | ||
124 | unsigned int start, unsigned int max, | ||
125 | unsigned int count, | ||
126 | struct cxgb3i_gather_list *gl) | ||
127 | { | ||
128 | unsigned int i, j, k; | ||
129 | |||
130 | /* not enough entries */ | ||
131 | if ((max - start) < count) | ||
132 | return -EBUSY; | ||
133 | |||
134 | max -= count; | ||
135 | spin_lock(&ddp->map_lock); | ||
136 | for (i = start; i < max;) { | ||
137 | for (j = 0, k = i; j < count; j++, k++) { | ||
138 | if (ddp->gl_map[k]) | ||
139 | break; | ||
140 | } | ||
141 | if (j == count) { | ||
142 | for (j = 0, k = i; j < count; j++, k++) | ||
143 | ddp->gl_map[k] = gl; | ||
144 | spin_unlock(&ddp->map_lock); | ||
145 | return i; | ||
146 | } | ||
147 | i += j + 1; | ||
148 | } | ||
149 | spin_unlock(&ddp->map_lock); | ||
150 | return -EBUSY; | ||
151 | } | ||
152 | |||
153 | static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp, | ||
154 | int start, int count) | ||
155 | { | ||
156 | spin_lock(&ddp->map_lock); | ||
157 | memset(&ddp->gl_map[start], 0, | ||
158 | count * sizeof(struct cxgb3i_gather_list *)); | ||
159 | spin_unlock(&ddp->map_lock); | ||
160 | } | ||
161 | |||
162 | static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp, | ||
163 | int idx, int count) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i < count; i++, idx++) | ||
168 | if (ddp->gl_skb[idx]) { | ||
169 | kfree_skb(ddp->gl_skb[idx]); | ||
170 | ddp->gl_skb[idx] = NULL; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx, | ||
175 | int count, gfp_t gfp) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | for (i = 0; i < count; i++) { | ||
180 | struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) + | ||
181 | PPOD_SIZE, gfp); | ||
182 | if (skb) { | ||
183 | ddp->gl_skb[idx + i] = skb; | ||
184 | skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE); | ||
185 | } else { | ||
186 | ddp_free_gl_skb(ddp, idx, i); | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | } | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * cxgb3i_ddp_find_page_index - return ddp page index for a given page size | ||
195 | * @pgsz: page size | ||
196 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
197 | */ | ||
198 | int cxgb3i_ddp_find_page_index(unsigned long pgsz) | ||
199 | { | ||
200 | int i; | ||
201 | |||
202 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
203 | if (pgsz == (1UL << ddp_page_shift[i])) | ||
204 | return i; | ||
205 | } | ||
206 | ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz); | ||
207 | return DDP_PGIDX_MAX; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE | ||
212 | * return the ddp page index, if no match is found return DDP_PGIDX_MAX. | ||
213 | */ | ||
214 | int cxgb3i_ddp_adjust_page_table(void) | ||
215 | { | ||
216 | int i; | ||
217 | unsigned int base_order, order; | ||
218 | |||
219 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | ||
220 | ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", | ||
221 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | base_order = get_order(1UL << ddp_page_shift[0]); | ||
226 | order = get_order(1 << PAGE_SHIFT); | ||
227 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | ||
228 | /* first is the kernel page size, then just doubling the size */ | ||
229 | ddp_page_order[i] = order - base_order + i; | ||
230 | ddp_page_shift[i] = PAGE_SHIFT + i; | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | ||
236 | struct cxgb3i_gather_list *gl) | ||
237 | { | ||
238 | int i; | ||
239 | |||
240 | for (i = 0; i < gl->nelem; i++) | ||
241 | pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE, | ||
242 | PCI_DMA_FROMDEVICE); | ||
243 | } | ||
244 | |||
245 | static inline int ddp_gl_map(struct pci_dev *pdev, | ||
246 | struct cxgb3i_gather_list *gl) | ||
247 | { | ||
248 | int i; | ||
249 | |||
250 | for (i = 0; i < gl->nelem; i++) { | ||
251 | gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0, | ||
252 | PAGE_SIZE, | ||
253 | PCI_DMA_FROMDEVICE); | ||
254 | if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i]))) | ||
255 | goto unmap; | ||
256 | } | ||
257 | |||
258 | return i; | ||
259 | |||
260 | unmap: | ||
261 | if (i) { | ||
262 | unsigned int nelem = gl->nelem; | ||
263 | |||
264 | gl->nelem = i; | ||
265 | ddp_gl_unmap(pdev, gl); | ||
266 | gl->nelem = nelem; | ||
267 | } | ||
268 | return -ENOMEM; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * cxgb3i_ddp_make_gl - build ddp page buffer list | ||
273 | * @xferlen: total buffer length | ||
274 | * @sgl: page buffer scatter-gather list | ||
275 | * @sgcnt: # of page buffers | ||
276 | * @pdev: pci_dev, used for pci map | ||
277 | * @gfp: allocation mode | ||
278 | * | ||
279 | * construct a ddp page buffer list from the scsi scattergather list. | ||
280 | * coalesce buffers as much as possible, and obtain dma addresses for | ||
281 | * each page. | ||
282 | * | ||
283 | * Return the cxgb3i_gather_list constructed from the page buffers if the | ||
284 | * memory can be used for ddp. Return NULL otherwise. | ||
285 | */ | ||
286 | struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen, | ||
287 | struct scatterlist *sgl, | ||
288 | unsigned int sgcnt, | ||
289 | struct pci_dev *pdev, | ||
290 | gfp_t gfp) | ||
291 | { | ||
292 | struct cxgb3i_gather_list *gl; | ||
293 | struct scatterlist *sg = sgl; | ||
294 | struct page *sgpage = sg_page(sg); | ||
295 | unsigned int sglen = sg->length; | ||
296 | unsigned int sgoffset = sg->offset; | ||
297 | unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> | ||
298 | PAGE_SHIFT; | ||
299 | int i = 1, j = 0; | ||
300 | |||
301 | if (xferlen < DDP_THRESHOLD) { | ||
302 | ddp_log_debug("xfer %u < threshold %u, no ddp.\n", | ||
303 | xferlen, DDP_THRESHOLD); | ||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | gl = kzalloc(sizeof(struct cxgb3i_gather_list) + | ||
308 | npages * (sizeof(dma_addr_t) + sizeof(struct page *)), | ||
309 | gfp); | ||
310 | if (!gl) | ||
311 | return NULL; | ||
312 | |||
313 | gl->pages = (struct page **)&gl->phys_addr[npages]; | ||
314 | gl->length = xferlen; | ||
315 | gl->offset = sgoffset; | ||
316 | gl->pages[0] = sgpage; | ||
317 | |||
318 | sg = sg_next(sg); | ||
319 | while (sg) { | ||
320 | struct page *page = sg_page(sg); | ||
321 | |||
322 | if (sgpage == page && sg->offset == sgoffset + sglen) | ||
323 | sglen += sg->length; | ||
324 | else { | ||
325 | /* make sure the sgl is fit for ddp: | ||
326 | * each has the same page size, and | ||
327 | * all of the middle pages are used completely | ||
328 | */ | ||
329 | if ((j && sgoffset) || | ||
330 | ((i != sgcnt - 1) && | ||
331 | ((sglen + sgoffset) & ~PAGE_MASK))) | ||
332 | goto error_out; | ||
333 | |||
334 | j++; | ||
335 | if (j == gl->nelem || sg->offset) | ||
336 | goto error_out; | ||
337 | gl->pages[j] = page; | ||
338 | sglen = sg->length; | ||
339 | sgoffset = sg->offset; | ||
340 | sgpage = page; | ||
341 | } | ||
342 | i++; | ||
343 | sg = sg_next(sg); | ||
344 | } | ||
345 | gl->nelem = ++j; | ||
346 | |||
347 | if (ddp_gl_map(pdev, gl) < 0) | ||
348 | goto error_out; | ||
349 | |||
350 | return gl; | ||
351 | |||
352 | error_out: | ||
353 | kfree(gl); | ||
354 | return NULL; | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * cxgb3i_ddp_release_gl - release a page buffer list | ||
359 | * @gl: a ddp page buffer list | ||
360 | * @pdev: pci_dev used for pci_unmap | ||
361 | * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl(). | ||
362 | */ | ||
363 | void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl, | ||
364 | struct pci_dev *pdev) | ||
365 | { | ||
366 | ddp_gl_unmap(pdev, gl); | ||
367 | kfree(gl); | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer | ||
372 | * @tdev: t3cdev adapter | ||
373 | * @tid: connection id | ||
374 | * @tformat: tag format | ||
375 | * @tagp: contains s/w tag initially, will be updated with ddp/hw tag | ||
376 | * @gl: the page momory list | ||
377 | * @gfp: allocation mode | ||
378 | * | ||
379 | * ddp setup for a given page buffer list and construct the ddp tag. | ||
380 | * return 0 if success, < 0 otherwise. | ||
381 | */ | ||
382 | int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid, | ||
383 | struct cxgb3i_tag_format *tformat, u32 *tagp, | ||
384 | struct cxgb3i_gather_list *gl, gfp_t gfp) | ||
385 | { | ||
386 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
387 | struct pagepod_hdr hdr; | ||
388 | unsigned int npods; | ||
389 | int idx = -1; | ||
390 | int err = -ENOMEM; | ||
391 | u32 sw_tag = *tagp; | ||
392 | u32 tag; | ||
393 | |||
394 | if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem || | ||
395 | gl->length < DDP_THRESHOLD) { | ||
396 | ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n", | ||
397 | page_idx, gl->length, DDP_THRESHOLD); | ||
398 | return -EINVAL; | ||
399 | } | ||
400 | |||
401 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
402 | |||
403 | if (ddp->idx_last == ddp->nppods) | ||
404 | idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl); | ||
405 | else { | ||
406 | idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, | ||
407 | ddp->nppods, npods, gl); | ||
408 | if (idx < 0 && ddp->idx_last >= npods) { | ||
409 | idx = ddp_find_unused_entries(ddp, 0, | ||
410 | min(ddp->idx_last + npods, ddp->nppods), | ||
411 | npods, gl); | ||
412 | } | ||
413 | } | ||
414 | if (idx < 0) { | ||
415 | ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n", | ||
416 | gl->length, gl->nelem, npods); | ||
417 | return idx; | ||
418 | } | ||
419 | |||
420 | err = ddp_alloc_gl_skb(ddp, idx, npods, gfp); | ||
421 | if (err < 0) | ||
422 | goto unmark_entries; | ||
423 | |||
424 | tag = cxgb3i_ddp_tag_base(tformat, sw_tag); | ||
425 | tag |= idx << PPOD_IDX_SHIFT; | ||
426 | |||
427 | hdr.rsvd = 0; | ||
428 | hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid)); | ||
429 | hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); | ||
430 | hdr.maxoffset = htonl(gl->length); | ||
431 | hdr.pgoffset = htonl(gl->offset); | ||
432 | |||
433 | err = set_ddp_map(ddp, &hdr, idx, npods, gl); | ||
434 | if (err < 0) | ||
435 | goto free_gl_skb; | ||
436 | |||
437 | ddp->idx_last = idx; | ||
438 | ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n", | ||
439 | gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, | ||
440 | idx, npods); | ||
441 | *tagp = tag; | ||
442 | return 0; | ||
443 | |||
444 | free_gl_skb: | ||
445 | ddp_free_gl_skb(ddp, idx, npods); | ||
446 | unmark_entries: | ||
447 | ddp_unmark_entries(ddp, idx, npods); | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * cxgb3i_ddp_tag_release - release a ddp tag | ||
453 | * @tdev: t3cdev adapter | ||
454 | * @tag: ddp tag | ||
455 | * ddp cleanup for a given ddp tag and release all the resources held | ||
456 | */ | ||
457 | void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag) | ||
458 | { | ||
459 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
460 | u32 idx; | ||
461 | |||
462 | if (!ddp) { | ||
463 | ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag); | ||
464 | return; | ||
465 | } | ||
466 | |||
467 | idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; | ||
468 | if (idx < ddp->nppods) { | ||
469 | struct cxgb3i_gather_list *gl = ddp->gl_map[idx]; | ||
470 | unsigned int npods; | ||
471 | |||
472 | if (!gl || !gl->nelem) { | ||
473 | ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n", | ||
474 | tag, idx, gl, gl ? gl->nelem : 0); | ||
475 | return; | ||
476 | } | ||
477 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | ||
478 | ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n", | ||
479 | tag, idx, npods); | ||
480 | clear_ddp_map(ddp, tag, idx, npods); | ||
481 | ddp_unmark_entries(ddp, idx, npods); | ||
482 | cxgb3i_ddp_release_gl(gl, ddp->pdev); | ||
483 | } else | ||
484 | ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n", | ||
485 | tag, idx, ddp->nppods); | ||
486 | } | ||
487 | |||
488 | static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx, | ||
489 | int reply) | ||
490 | { | ||
491 | struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), | ||
492 | GFP_KERNEL); | ||
493 | struct cpl_set_tcb_field *req; | ||
494 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | ||
495 | |||
496 | if (!skb) | ||
497 | return -ENOMEM; | ||
498 | |||
499 | /* set up ulp submode and page size */ | ||
500 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | ||
501 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
502 | req->wr.wr_lo = 0; | ||
503 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
504 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
505 | req->cpu_idx = 0; | ||
506 | req->word = htons(31); | ||
507 | req->mask = cpu_to_be64(0xF0000000); | ||
508 | req->val = cpu_to_be64(val << 28); | ||
509 | skb->priority = CPL_PRIORITY_CONTROL; | ||
510 | |||
511 | cxgb3_ofld_send(tdev, skb); | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | /** | ||
516 | * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size | ||
517 | * @tdev: t3cdev adapter | ||
518 | * @tid: connection id | ||
519 | * @reply: request reply from h/w | ||
520 | * set up the ddp page size based on the host PAGE_SIZE for a connection | ||
521 | * identified by tid | ||
522 | */ | ||
523 | int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid, | ||
524 | int reply) | ||
525 | { | ||
526 | return setup_conn_pgidx(tdev, tid, page_idx, reply); | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size | ||
531 | * @tdev: t3cdev adapter | ||
532 | * @tid: connection id | ||
533 | * @reply: request reply from h/w | ||
534 | * @pgsz: ddp page size | ||
535 | * set up the ddp page size for a connection identified by tid | ||
536 | */ | ||
537 | int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid, | ||
538 | int reply, unsigned long pgsz) | ||
539 | { | ||
540 | int pgidx = cxgb3i_ddp_find_page_index(pgsz); | ||
541 | |||
542 | return setup_conn_pgidx(tdev, tid, pgidx, reply); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * cxgb3i_setup_conn_digest - setup conn. digest setting | ||
547 | * @tdev: t3cdev adapter | ||
548 | * @tid: connection id | ||
549 | * @hcrc: header digest enabled | ||
550 | * @dcrc: data digest enabled | ||
551 | * @reply: request reply from h/w | ||
552 | * set up the iscsi digest settings for a connection identified by tid | ||
553 | */ | ||
554 | int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid, | ||
555 | int hcrc, int dcrc, int reply) | ||
556 | { | ||
557 | struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), | ||
558 | GFP_KERNEL); | ||
559 | struct cpl_set_tcb_field *req; | ||
560 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | ||
561 | |||
562 | if (!skb) | ||
563 | return -ENOMEM; | ||
564 | |||
565 | /* set up ulp submode and page size */ | ||
566 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | ||
567 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
568 | req->wr.wr_lo = 0; | ||
569 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
570 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
571 | req->cpu_idx = 0; | ||
572 | req->word = htons(31); | ||
573 | req->mask = cpu_to_be64(0x0F000000); | ||
574 | req->val = cpu_to_be64(val << 24); | ||
575 | skb->priority = CPL_PRIORITY_CONTROL; | ||
576 | |||
577 | cxgb3_ofld_send(tdev, skb); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | |||
582 | /** | ||
583 | * cxgb3i_adapter_ddp_info - read the adapter's ddp information | ||
584 | * @tdev: t3cdev adapter | ||
585 | * @tformat: tag format | ||
586 | * @txsz: max tx pdu payload size, filled in by this func. | ||
587 | * @rxsz: max rx pdu payload size, filled in by this func. | ||
588 | * setup the tag format for a given iscsi entity | ||
589 | */ | ||
590 | int cxgb3i_adapter_ddp_info(struct t3cdev *tdev, | ||
591 | struct cxgb3i_tag_format *tformat, | ||
592 | unsigned int *txsz, unsigned int *rxsz) | ||
593 | { | ||
594 | struct cxgb3i_ddp_info *ddp; | ||
595 | unsigned char idx_bits; | ||
596 | |||
597 | if (!tformat) | ||
598 | return -EINVAL; | ||
599 | |||
600 | if (!tdev->ulp_iscsi) | ||
601 | return -EINVAL; | ||
602 | |||
603 | ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | ||
604 | |||
605 | idx_bits = 32 - tformat->sw_bits; | ||
606 | tformat->rsvd_bits = ddp->idx_bits; | ||
607 | tformat->rsvd_shift = PPOD_IDX_SHIFT; | ||
608 | tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1; | ||
609 | |||
610 | ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n", | ||
611 | tformat->sw_bits, tformat->rsvd_bits, | ||
612 | tformat->rsvd_shift, tformat->rsvd_mask); | ||
613 | |||
614 | *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
615 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
616 | *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | ||
617 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); | ||
618 | ddp_log_info("max payload size: %u/%u, %u/%u.\n", | ||
619 | *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz); | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | /** | ||
624 | * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource | ||
625 | * @tdev: t3cdev adapter | ||
626 | * release all the resource held by the ddp pagepod manager for a given | ||
627 | * adapter if needed | ||
628 | */ | ||
629 | |||
630 | static void ddp_cleanup(struct kref *kref) | ||
631 | { | ||
632 | struct cxgb3i_ddp_info *ddp = container_of(kref, | ||
633 | struct cxgb3i_ddp_info, | ||
634 | refcnt); | ||
635 | int i = 0; | ||
636 | |||
637 | ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); | ||
638 | |||
639 | ddp->tdev->ulp_iscsi = NULL; | ||
640 | while (i < ddp->nppods) { | ||
641 | struct cxgb3i_gather_list *gl = ddp->gl_map[i]; | ||
642 | if (gl) { | ||
643 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | ||
644 | >> PPOD_PAGES_SHIFT; | ||
645 | ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", | ||
646 | ddp->tdev, i, npods); | ||
647 | kfree(gl); | ||
648 | ddp_free_gl_skb(ddp, i, npods); | ||
649 | i += npods; | ||
650 | } else | ||
651 | i++; | ||
652 | } | ||
653 | cxgb3i_free_big_mem(ddp); | ||
654 | } | ||
655 | |||
656 | void cxgb3i_ddp_cleanup(struct t3cdev *tdev) | ||
657 | { | ||
658 | struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; | ||
659 | |||
660 | ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); | ||
661 | if (ddp) | ||
662 | kref_put(&ddp->refcnt, ddp_cleanup); | ||
663 | } | ||
664 | |||
665 | /** | ||
666 | * ddp_init - initialize the cxgb3 adapter's ddp resource | ||
667 | * @tdev: t3cdev adapter | ||
668 | * initialize the ddp pagepod manager for a given adapter | ||
669 | */ | ||
670 | static void ddp_init(struct t3cdev *tdev) | ||
671 | { | ||
672 | struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; | ||
673 | struct ulp_iscsi_info uinfo; | ||
674 | unsigned int ppmax, bits; | ||
675 | int i, err; | ||
676 | |||
677 | if (ddp) { | ||
678 | kref_get(&ddp->refcnt); | ||
679 | ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | ||
680 | tdev, tdev->ulp_iscsi); | ||
681 | return; | ||
682 | } | ||
683 | |||
684 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | ||
685 | if (err < 0) { | ||
686 | ddp_log_error("%s, failed to get iscsi param err=%d.\n", | ||
687 | tdev->name, err); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; | ||
692 | bits = __ilog2_u32(ppmax) + 1; | ||
693 | if (bits > PPOD_IDX_MAX_SIZE) | ||
694 | bits = PPOD_IDX_MAX_SIZE; | ||
695 | ppmax = (1 << (bits - 1)) - 1; | ||
696 | |||
697 | ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) + | ||
698 | ppmax * | ||
699 | (sizeof(struct cxgb3i_gather_list *) + | ||
700 | sizeof(struct sk_buff *)), | ||
701 | GFP_KERNEL); | ||
702 | if (!ddp) { | ||
703 | ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n", | ||
704 | tdev->name, ppmax); | ||
705 | return; | ||
706 | } | ||
707 | ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1); | ||
708 | ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + | ||
709 | ppmax * | ||
710 | sizeof(struct cxgb3i_gather_list *)); | ||
711 | spin_lock_init(&ddp->map_lock); | ||
712 | kref_init(&ddp->refcnt); | ||
713 | |||
714 | ddp->tdev = tdev; | ||
715 | ddp->pdev = uinfo.pdev; | ||
716 | ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE); | ||
717 | ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE); | ||
718 | ddp->llimit = uinfo.llimit; | ||
719 | ddp->ulimit = uinfo.ulimit; | ||
720 | ddp->nppods = ppmax; | ||
721 | ddp->idx_last = ppmax; | ||
722 | ddp->idx_bits = bits; | ||
723 | ddp->idx_mask = (1 << bits) - 1; | ||
724 | ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; | ||
725 | |||
726 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | ||
727 | for (i = 0; i < DDP_PGIDX_MAX; i++) | ||
728 | uinfo.pgsz_factor[i] = ddp_page_order[i]; | ||
729 | uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); | ||
730 | |||
731 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | ||
732 | if (err < 0) { | ||
733 | ddp_log_warn("%s unable to set iscsi param err=%d, " | ||
734 | "ddp disabled.\n", tdev->name, err); | ||
735 | goto free_ddp_map; | ||
736 | } | ||
737 | |||
738 | tdev->ulp_iscsi = ddp; | ||
739 | |||
740 | ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u," | ||
741 | " %u/%u.\n", | ||
742 | tdev, ppmax, ddp->idx_bits, ddp->idx_mask, | ||
743 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | ||
744 | ddp->max_rxsz, uinfo.max_rxsz); | ||
745 | return; | ||
746 | |||
747 | free_ddp_map: | ||
748 | cxgb3i_free_big_mem(ddp); | ||
749 | } | ||
750 | |||
751 | /** | ||
752 | * cxgb3i_ddp_init - initialize ddp functions | ||
753 | */ | ||
754 | void cxgb3i_ddp_init(struct t3cdev *tdev) | ||
755 | { | ||
756 | if (page_idx == DDP_PGIDX_MAX) { | ||
757 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
758 | |||
759 | if (page_idx == DDP_PGIDX_MAX) { | ||
760 | ddp_log_info("system PAGE_SIZE %lu, update hw.\n", | ||
761 | PAGE_SIZE); | ||
762 | if (cxgb3i_ddp_adjust_page_table() < 0) { | ||
763 | ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", | ||
764 | PAGE_SIZE); | ||
765 | return; | ||
766 | } | ||
767 | page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); | ||
768 | } | ||
769 | ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", | ||
770 | PAGE_SIZE, page_idx); | ||
771 | } | ||
772 | ddp_init(tdev); | ||
773 | } | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h deleted file mode 100644 index 6761b329124d..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ /dev/null | |||
@@ -1,312 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #ifndef __CXGB3I_ULP2_DDP_H__ | ||
14 | #define __CXGB3I_ULP2_DDP_H__ | ||
15 | |||
16 | #include <linux/slab.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | |||
19 | /** | ||
20 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity | ||
21 | * | ||
22 | * @sw_bits: # of bits used by iscsi software layer | ||
23 | * @rsvd_bits: # of bits used by h/w | ||
24 | * @rsvd_shift: h/w bits shift left | ||
25 | * @rsvd_mask: reserved bit mask | ||
26 | */ | ||
27 | struct cxgb3i_tag_format { | ||
28 | unsigned char sw_bits; | ||
29 | unsigned char rsvd_bits; | ||
30 | unsigned char rsvd_shift; | ||
31 | unsigned char filler[1]; | ||
32 | u32 rsvd_mask; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct cxgb3i_gather_list - cxgb3i direct data placement memory | ||
37 | * | ||
38 | * @tag: ddp tag | ||
39 | * @length: total data buffer length | ||
40 | * @offset: initial offset to the 1st page | ||
41 | * @nelem: # of pages | ||
42 | * @pages: page pointers | ||
43 | * @phys_addr: physical address | ||
44 | */ | ||
45 | struct cxgb3i_gather_list { | ||
46 | u32 tag; | ||
47 | unsigned int length; | ||
48 | unsigned int offset; | ||
49 | unsigned int nelem; | ||
50 | struct page **pages; | ||
51 | dma_addr_t phys_addr[0]; | ||
52 | }; | ||
53 | |||
54 | /** | ||
55 | * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload | ||
56 | * | ||
57 | * @list: list head to link elements | ||
58 | * @refcnt: ref. count | ||
59 | * @tdev: pointer to t3cdev used by cxgb3 driver | ||
60 | * @max_txsz: max tx packet size for ddp | ||
61 | * @max_rxsz: max rx packet size for ddp | ||
62 | * @llimit: lower bound of the page pod memory | ||
63 | * @ulimit: upper bound of the page pod memory | ||
64 | * @nppods: # of page pod entries | ||
65 | * @idx_last: page pod entry last used | ||
66 | * @idx_bits: # of bits the pagepod index would take | ||
67 | * @idx_mask: pagepod index mask | ||
68 | * @rsvd_tag_mask: tag mask | ||
69 | * @map_lock: lock to synchonize access to the page pod map | ||
70 | * @gl_map: ddp memory gather list | ||
71 | * @gl_skb: skb used to program the pagepod | ||
72 | */ | ||
73 | struct cxgb3i_ddp_info { | ||
74 | struct list_head list; | ||
75 | struct kref refcnt; | ||
76 | struct t3cdev *tdev; | ||
77 | struct pci_dev *pdev; | ||
78 | unsigned int max_txsz; | ||
79 | unsigned int max_rxsz; | ||
80 | unsigned int llimit; | ||
81 | unsigned int ulimit; | ||
82 | unsigned int nppods; | ||
83 | unsigned int idx_last; | ||
84 | unsigned char idx_bits; | ||
85 | unsigned char filler[3]; | ||
86 | u32 idx_mask; | ||
87 | u32 rsvd_tag_mask; | ||
88 | spinlock_t map_lock; | ||
89 | struct cxgb3i_gather_list **gl_map; | ||
90 | struct sk_buff **gl_skb; | ||
91 | }; | ||
92 | |||
93 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */ | ||
94 | #define ULP2_MAX_PKT_SIZE 16224 | ||
95 | #define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) | ||
96 | #define PPOD_PAGES_MAX 4 | ||
97 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ | ||
98 | |||
99 | /* | ||
100 | * struct pagepod_hdr, pagepod - pagepod format | ||
101 | */ | ||
102 | struct pagepod_hdr { | ||
103 | u32 vld_tid; | ||
104 | u32 pgsz_tag_clr; | ||
105 | u32 maxoffset; | ||
106 | u32 pgoffset; | ||
107 | u64 rsvd; | ||
108 | }; | ||
109 | |||
110 | struct pagepod { | ||
111 | struct pagepod_hdr hdr; | ||
112 | u64 addr[PPOD_PAGES_MAX + 1]; | ||
113 | }; | ||
114 | |||
115 | #define PPOD_SIZE sizeof(struct pagepod) /* 64 */ | ||
116 | #define PPOD_SIZE_SHIFT 6 | ||
117 | |||
118 | #define PPOD_COLOR_SHIFT 0 | ||
119 | #define PPOD_COLOR_SIZE 6 | ||
120 | #define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1) | ||
121 | |||
122 | #define PPOD_IDX_SHIFT PPOD_COLOR_SIZE | ||
123 | #define PPOD_IDX_MAX_SIZE 24 | ||
124 | |||
125 | #define S_PPOD_TID 0 | ||
126 | #define M_PPOD_TID 0xFFFFFF | ||
127 | #define V_PPOD_TID(x) ((x) << S_PPOD_TID) | ||
128 | |||
129 | #define S_PPOD_VALID 24 | ||
130 | #define V_PPOD_VALID(x) ((x) << S_PPOD_VALID) | ||
131 | #define F_PPOD_VALID V_PPOD_VALID(1U) | ||
132 | |||
133 | #define S_PPOD_COLOR 0 | ||
134 | #define M_PPOD_COLOR 0x3F | ||
135 | #define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR) | ||
136 | |||
137 | #define S_PPOD_TAG 6 | ||
138 | #define M_PPOD_TAG 0xFFFFFF | ||
139 | #define V_PPOD_TAG(x) ((x) << S_PPOD_TAG) | ||
140 | |||
141 | #define S_PPOD_PGSZ 30 | ||
142 | #define M_PPOD_PGSZ 0x3 | ||
143 | #define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ) | ||
144 | |||
145 | /* | ||
146 | * large memory chunk allocation/release | ||
147 | * use vmalloc() if kmalloc() fails | ||
148 | */ | ||
149 | static inline void *cxgb3i_alloc_big_mem(unsigned int size, | ||
150 | gfp_t gfp) | ||
151 | { | ||
152 | void *p = kmalloc(size, gfp); | ||
153 | if (!p) | ||
154 | p = vmalloc(size); | ||
155 | if (p) | ||
156 | memset(p, 0, size); | ||
157 | return p; | ||
158 | } | ||
159 | |||
160 | static inline void cxgb3i_free_big_mem(void *addr) | ||
161 | { | ||
162 | if (is_vmalloc_addr(addr)) | ||
163 | vfree(addr); | ||
164 | else | ||
165 | kfree(addr); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and | ||
170 | * non-reserved bits that can be used by the iscsi s/w. | ||
171 | * The reserved bits are identified by the rsvd_bits and rsvd_shift fields | ||
172 | * in struct cxgb3i_tag_format. | ||
173 | * | ||
174 | * The upper most reserved bit can be used to check if a tag is ddp tag or not: | ||
175 | * if the bit is 0, the tag is a valid ddp tag | ||
176 | */ | ||
177 | |||
178 | /** | ||
179 | * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag | ||
180 | * @tformat: tag format information | ||
181 | * @tag: tag to be checked | ||
182 | * | ||
183 | * return true if the tag is a ddp tag, false otherwise. | ||
184 | */ | ||
185 | static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag) | ||
186 | { | ||
187 | return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * cxgb3i_sw_tag_usable - check if s/w tag has enough bits left for hw bits | ||
192 | * @tformat: tag format information | ||
193 | * @sw_tag: s/w tag to be checked | ||
194 | * | ||
195 | * return true if the tag can be used for hw ddp tag, false otherwise. | ||
196 | */ | ||
197 | static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat, | ||
198 | u32 sw_tag) | ||
199 | { | ||
200 | sw_tag >>= (32 - tformat->rsvd_bits); | ||
201 | return !sw_tag; | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag | ||
206 | * @tformat: tag format information | ||
207 | * @sw_tag: s/w tag to be checked | ||
208 | * | ||
209 | * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag. | ||
210 | */ | ||
211 | static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat, | ||
212 | u32 sw_tag) | ||
213 | { | ||
214 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | ||
215 | u32 mask = (1 << shift) - 1; | ||
216 | |||
217 | if (sw_tag && (sw_tag & ~mask)) { | ||
218 | u32 v1 = sw_tag & ((1 << shift) - 1); | ||
219 | u32 v2 = (sw_tag >> (shift - 1)) << shift; | ||
220 | |||
221 | return v2 | v1 | 1 << shift; | ||
222 | } | ||
223 | return sw_tag | 1 << shift; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * cxgb3i_ddp_tag_base - shift s/w tag bits so that reserved bits are not used | ||
228 | * @tformat: tag format information | ||
229 | * @sw_tag: s/w tag to be checked | ||
230 | */ | ||
231 | static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat, | ||
232 | u32 sw_tag) | ||
233 | { | ||
234 | u32 mask = (1 << tformat->rsvd_shift) - 1; | ||
235 | |||
236 | if (sw_tag && (sw_tag & ~mask)) { | ||
237 | u32 v1 = sw_tag & mask; | ||
238 | u32 v2 = sw_tag >> tformat->rsvd_shift; | ||
239 | |||
240 | v2 <<= tformat->rsvd_shift + tformat->rsvd_bits; | ||
241 | return v2 | v1; | ||
242 | } | ||
243 | return sw_tag; | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w | ||
248 | * @tformat: tag format information | ||
249 | * @tag: tag to be checked | ||
250 | * | ||
251 | * return the reserved bits in the tag | ||
252 | */ | ||
253 | static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat, | ||
254 | u32 tag) | ||
255 | { | ||
256 | if (cxgb3i_is_ddp_tag(tformat, tag)) | ||
257 | return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w | ||
263 | * @tformat: tag format information | ||
264 | * @tag: tag to be checked | ||
265 | * | ||
266 | * return the non-reserved bits in the tag. | ||
267 | */ | ||
268 | static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat, | ||
269 | u32 tag) | ||
270 | { | ||
271 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | ||
272 | u32 v1, v2; | ||
273 | |||
274 | if (cxgb3i_is_ddp_tag(tformat, tag)) { | ||
275 | v1 = tag & ((1 << tformat->rsvd_shift) - 1); | ||
276 | v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; | ||
277 | } else { | ||
278 | u32 mask = (1 << shift) - 1; | ||
279 | |||
280 | tag &= ~(1 << shift); | ||
281 | v1 = tag & mask; | ||
282 | v2 = (tag >> 1) & ~mask; | ||
283 | } | ||
284 | return v1 | v2; | ||
285 | } | ||
286 | |||
287 | int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid, | ||
288 | struct cxgb3i_tag_format *, u32 *tag, | ||
289 | struct cxgb3i_gather_list *, gfp_t gfp); | ||
290 | void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag); | ||
291 | |||
292 | struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen, | ||
293 | struct scatterlist *sgl, | ||
294 | unsigned int sgcnt, | ||
295 | struct pci_dev *pdev, | ||
296 | gfp_t gfp); | ||
297 | void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl, | ||
298 | struct pci_dev *pdev); | ||
299 | |||
300 | int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid, | ||
301 | int reply); | ||
302 | int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply, | ||
303 | unsigned long pgsz); | ||
304 | int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid, | ||
305 | int hcrc, int dcrc, int reply); | ||
306 | int cxgb3i_ddp_find_page_index(unsigned long pgsz); | ||
307 | int cxgb3i_adapter_ddp_info(struct t3cdev *, struct cxgb3i_tag_format *, | ||
308 | unsigned int *txsz, unsigned int *rxsz); | ||
309 | |||
310 | void cxgb3i_ddp_init(struct t3cdev *); | ||
311 | void cxgb3i_ddp_cleanup(struct t3cdev *); | ||
312 | #endif | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c deleted file mode 100644 index 685af3698518..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* cxgb3i_init.c: Chelsio S3xx iSCSI driver. | ||
2 | * | ||
3 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Written by: Karen Xie (kxie@chelsio.com) | ||
10 | */ | ||
11 | |||
12 | #include "cxgb3i.h" | ||
13 | |||
14 | #define DRV_MODULE_NAME "cxgb3i" | ||
15 | #define DRV_MODULE_VERSION "1.0.2" | ||
16 | #define DRV_MODULE_RELDATE "Mar. 2009" | ||
17 | |||
18 | static char version[] = | ||
19 | "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME | ||
20 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
21 | |||
22 | MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>"); | ||
23 | MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver"); | ||
24 | MODULE_LICENSE("GPL"); | ||
25 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
26 | |||
27 | static void open_s3_dev(struct t3cdev *); | ||
28 | static void close_s3_dev(struct t3cdev *); | ||
29 | static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port); | ||
30 | |||
31 | static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; | ||
32 | static struct cxgb3_client t3c_client = { | ||
33 | .name = "iscsi_cxgb3", | ||
34 | .handlers = cxgb3i_cpl_handlers, | ||
35 | .add = open_s3_dev, | ||
36 | .remove = close_s3_dev, | ||
37 | .event_handler = s3_event_handler, | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * open_s3_dev - register with cxgb3 LLD | ||
42 | * @t3dev: cxgb3 adapter instance | ||
43 | */ | ||
44 | static void open_s3_dev(struct t3cdev *t3dev) | ||
45 | { | ||
46 | static int vers_printed; | ||
47 | |||
48 | if (!vers_printed) { | ||
49 | printk(KERN_INFO "%s", version); | ||
50 | vers_printed = 1; | ||
51 | } | ||
52 | |||
53 | cxgb3i_ddp_init(t3dev); | ||
54 | cxgb3i_sdev_add(t3dev, &t3c_client); | ||
55 | cxgb3i_adapter_open(t3dev); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * close_s3_dev - de-register with cxgb3 LLD | ||
60 | * @t3dev: cxgb3 adapter instance | ||
61 | */ | ||
62 | static void close_s3_dev(struct t3cdev *t3dev) | ||
63 | { | ||
64 | cxgb3i_adapter_close(t3dev); | ||
65 | cxgb3i_sdev_remove(t3dev); | ||
66 | cxgb3i_ddp_cleanup(t3dev); | ||
67 | } | ||
68 | |||
69 | static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port) | ||
70 | { | ||
71 | struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); | ||
72 | |||
73 | cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n", | ||
74 | snic, tdev, event, port); | ||
75 | if (!snic) | ||
76 | return; | ||
77 | |||
78 | switch (event) { | ||
79 | case OFFLOAD_STATUS_DOWN: | ||
80 | snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; | ||
81 | break; | ||
82 | case OFFLOAD_STATUS_UP: | ||
83 | snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET; | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * cxgb3i_init_module - module init entry point | ||
90 | * | ||
91 | * initialize any driver wide global data structures and register itself | ||
92 | * with the cxgb3 module | ||
93 | */ | ||
94 | static int __init cxgb3i_init_module(void) | ||
95 | { | ||
96 | int err; | ||
97 | |||
98 | err = cxgb3i_sdev_init(cxgb3i_cpl_handlers); | ||
99 | if (err < 0) | ||
100 | return err; | ||
101 | |||
102 | err = cxgb3i_iscsi_init(); | ||
103 | if (err < 0) | ||
104 | return err; | ||
105 | |||
106 | err = cxgb3i_pdu_init(); | ||
107 | if (err < 0) { | ||
108 | cxgb3i_iscsi_cleanup(); | ||
109 | return err; | ||
110 | } | ||
111 | |||
112 | cxgb3_register_client(&t3c_client); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * cxgb3i_exit_module - module cleanup/exit entry point | ||
119 | * | ||
120 | * go through the driver hba list and for each hba, release any resource held. | ||
121 | * and unregisters iscsi transport and the cxgb3 module | ||
122 | */ | ||
123 | static void __exit cxgb3i_exit_module(void) | ||
124 | { | ||
125 | cxgb3_unregister_client(&t3c_client); | ||
126 | cxgb3i_pdu_cleanup(); | ||
127 | cxgb3i_iscsi_cleanup(); | ||
128 | cxgb3i_sdev_cleanup(); | ||
129 | } | ||
130 | |||
131 | module_init(cxgb3i_init_module); | ||
132 | module_exit(cxgb3i_exit_module); | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c deleted file mode 100644 index 7b686abaae64..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ /dev/null | |||
@@ -1,1018 +0,0 @@ | |||
1 | /* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver. | ||
2 | * | ||
3 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
4 | * Copyright (c) 2008 Mike Christie | ||
5 | * Copyright (c) 2008 Red Hat, Inc. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * Written by: Karen Xie (kxie@chelsio.com) | ||
12 | */ | ||
13 | |||
14 | #include <linux/inet.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/crypto.h> | ||
17 | #include <linux/if_vlan.h> | ||
18 | #include <net/dst.h> | ||
19 | #include <net/tcp.h> | ||
20 | #include <scsi/scsi_cmnd.h> | ||
21 | #include <scsi/scsi_device.h> | ||
22 | #include <scsi/scsi_eh.h> | ||
23 | #include <scsi/scsi_host.h> | ||
24 | #include <scsi/scsi.h> | ||
25 | #include <scsi/iscsi_proto.h> | ||
26 | #include <scsi/libiscsi.h> | ||
27 | #include <scsi/scsi_transport_iscsi.h> | ||
28 | |||
29 | #include "cxgb3i.h" | ||
30 | #include "cxgb3i_pdu.h" | ||
31 | |||
32 | #ifdef __DEBUG_CXGB3I_TAG__ | ||
33 | #define cxgb3i_tag_debug cxgb3i_log_debug | ||
34 | #else | ||
35 | #define cxgb3i_tag_debug(fmt...) | ||
36 | #endif | ||
37 | |||
38 | #ifdef __DEBUG_CXGB3I_API__ | ||
39 | #define cxgb3i_api_debug cxgb3i_log_debug | ||
40 | #else | ||
41 | #define cxgb3i_api_debug(fmt...) | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * align pdu size to multiple of 512 for better performance | ||
46 | */ | ||
47 | #define align_pdu_size(n) do { n = (n) & (~511); } while (0) | ||
48 | |||
49 | static struct scsi_transport_template *cxgb3i_scsi_transport; | ||
50 | static struct scsi_host_template cxgb3i_host_template; | ||
51 | static struct iscsi_transport cxgb3i_iscsi_transport; | ||
52 | static unsigned char sw_tag_idx_bits; | ||
53 | static unsigned char sw_tag_age_bits; | ||
54 | |||
55 | static LIST_HEAD(cxgb3i_snic_list); | ||
56 | static DEFINE_RWLOCK(cxgb3i_snic_rwlock); | ||
57 | |||
58 | /** | ||
59 | * cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev | ||
60 | * @tdev: t3cdev pointer | ||
61 | */ | ||
62 | struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev) | ||
63 | { | ||
64 | struct cxgb3i_adapter *snic; | ||
65 | |||
66 | read_lock(&cxgb3i_snic_rwlock); | ||
67 | list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { | ||
68 | if (snic->tdev == tdev) { | ||
69 | read_unlock(&cxgb3i_snic_rwlock); | ||
70 | return snic; | ||
71 | } | ||
72 | } | ||
73 | read_unlock(&cxgb3i_snic_rwlock); | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | static inline int adapter_update(struct cxgb3i_adapter *snic) | ||
78 | { | ||
79 | cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n", | ||
80 | snic, snic->tdev); | ||
81 | return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format, | ||
82 | &snic->tx_max_size, | ||
83 | &snic->rx_max_size); | ||
84 | } | ||
85 | |||
86 | static int adapter_add(struct cxgb3i_adapter *snic) | ||
87 | { | ||
88 | struct t3cdev *t3dev = snic->tdev; | ||
89 | struct adapter *adapter = tdev2adap(t3dev); | ||
90 | int i, err; | ||
91 | |||
92 | snic->pdev = adapter->pdev; | ||
93 | snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; | ||
94 | |||
95 | err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format, | ||
96 | &snic->tx_max_size, | ||
97 | &snic->rx_max_size); | ||
98 | if (err < 0) | ||
99 | return err; | ||
100 | |||
101 | for_each_port(adapter, i) { | ||
102 | snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]); | ||
103 | if (!snic->hba[i]) | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | snic->hba_cnt = adapter->params.nports; | ||
107 | |||
108 | /* add to the list */ | ||
109 | write_lock(&cxgb3i_snic_rwlock); | ||
110 | list_add_tail(&snic->list_head, &cxgb3i_snic_list); | ||
111 | write_unlock(&cxgb3i_snic_rwlock); | ||
112 | |||
113 | cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n", | ||
114 | t3dev, snic, snic->hba_cnt); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings | ||
120 | * @t3dev: t3cdev adapter | ||
121 | */ | ||
122 | void cxgb3i_adapter_open(struct t3cdev *t3dev) | ||
123 | { | ||
124 | struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); | ||
125 | int err; | ||
126 | |||
127 | if (snic) | ||
128 | err = adapter_update(snic); | ||
129 | else { | ||
130 | snic = kzalloc(sizeof(*snic), GFP_KERNEL); | ||
131 | if (snic) { | ||
132 | spin_lock_init(&snic->lock); | ||
133 | snic->tdev = t3dev; | ||
134 | err = adapter_add(snic); | ||
135 | } else | ||
136 | err = -ENOMEM; | ||
137 | } | ||
138 | |||
139 | if (err < 0) { | ||
140 | cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", | ||
141 | snic, snic ? snic->flags : 0, t3dev, err); | ||
142 | if (snic) { | ||
143 | snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET; | ||
144 | cxgb3i_adapter_close(t3dev); | ||
145 | } | ||
146 | } | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * cxgb3i_adapter_close - release the resources held and cleanup h/w settings | ||
151 | * @t3dev: t3cdev adapter | ||
152 | */ | ||
153 | void cxgb3i_adapter_close(struct t3cdev *t3dev) | ||
154 | { | ||
155 | struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); | ||
156 | int i; | ||
157 | |||
158 | if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) { | ||
159 | cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n", | ||
160 | t3dev, snic, snic ? snic->flags : 0); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | /* remove from the list */ | ||
165 | write_lock(&cxgb3i_snic_rwlock); | ||
166 | list_del(&snic->list_head); | ||
167 | write_unlock(&cxgb3i_snic_rwlock); | ||
168 | |||
169 | for (i = 0; i < snic->hba_cnt; i++) { | ||
170 | if (snic->hba[i]) { | ||
171 | cxgb3i_hba_host_remove(snic->hba[i]); | ||
172 | snic->hba[i] = NULL; | ||
173 | } | ||
174 | } | ||
175 | cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n", | ||
176 | t3dev, snic, snic->hba_cnt); | ||
177 | kfree(snic); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device | ||
182 | * @t3dev: t3cdev adapter | ||
183 | */ | ||
184 | static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) | ||
185 | { | ||
186 | struct cxgb3i_adapter *snic; | ||
187 | int i; | ||
188 | |||
189 | if (ndev->priv_flags & IFF_802_1Q_VLAN) | ||
190 | ndev = vlan_dev_real_dev(ndev); | ||
191 | |||
192 | read_lock(&cxgb3i_snic_rwlock); | ||
193 | list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { | ||
194 | for (i = 0; i < snic->hba_cnt; i++) { | ||
195 | if (snic->hba[i]->ndev == ndev) { | ||
196 | read_unlock(&cxgb3i_snic_rwlock); | ||
197 | return snic->hba[i]; | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | read_unlock(&cxgb3i_snic_rwlock); | ||
202 | return NULL; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * cxgb3i_hba_host_add - register a new host with scsi/iscsi | ||
207 | * @snic: the cxgb3i adapter | ||
208 | * @ndev: associated net_device | ||
209 | */ | ||
210 | struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic, | ||
211 | struct net_device *ndev) | ||
212 | { | ||
213 | struct cxgb3i_hba *hba; | ||
214 | struct Scsi_Host *shost; | ||
215 | int err; | ||
216 | |||
217 | shost = iscsi_host_alloc(&cxgb3i_host_template, | ||
218 | sizeof(struct cxgb3i_hba), 1); | ||
219 | if (!shost) { | ||
220 | cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n", | ||
221 | snic, ndev); | ||
222 | return NULL; | ||
223 | } | ||
224 | |||
225 | shost->transportt = cxgb3i_scsi_transport; | ||
226 | shost->max_lun = CXGB3I_MAX_LUN; | ||
227 | shost->max_id = CXGB3I_MAX_TARGET; | ||
228 | shost->max_channel = 0; | ||
229 | shost->max_cmd_len = 16; | ||
230 | |||
231 | hba = iscsi_host_priv(shost); | ||
232 | hba->snic = snic; | ||
233 | hba->ndev = ndev; | ||
234 | hba->shost = shost; | ||
235 | |||
236 | pci_dev_get(snic->pdev); | ||
237 | err = iscsi_host_add(shost, &snic->pdev->dev); | ||
238 | if (err) { | ||
239 | cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n", | ||
240 | snic, ndev); | ||
241 | goto pci_dev_put; | ||
242 | } | ||
243 | |||
244 | cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", | ||
245 | shost, hba, shost->host_no); | ||
246 | |||
247 | return hba; | ||
248 | |||
249 | pci_dev_put: | ||
250 | pci_dev_put(snic->pdev); | ||
251 | scsi_host_put(shost); | ||
252 | return NULL; | ||
253 | } | ||
254 | |||
255 | /** | ||
256 | * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi | ||
257 | * @hba: the cxgb3i hba | ||
258 | */ | ||
259 | void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) | ||
260 | { | ||
261 | cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", | ||
262 | hba->shost, hba, hba->shost->host_no); | ||
263 | iscsi_host_remove(hba->shost); | ||
264 | pci_dev_put(hba->snic->pdev); | ||
265 | iscsi_host_free(hba->shost); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * cxgb3i_ep_connect - establish TCP connection to target portal | ||
270 | * @shost: scsi host to use | ||
271 | * @dst_addr: target IP address | ||
272 | * @non_blocking: blocking or non-blocking call | ||
273 | * | ||
274 | * Initiates a TCP/IP connection to the dst_addr | ||
275 | */ | ||
276 | static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, | ||
277 | struct sockaddr *dst_addr, | ||
278 | int non_blocking) | ||
279 | { | ||
280 | struct iscsi_endpoint *ep; | ||
281 | struct cxgb3i_endpoint *cep; | ||
282 | struct cxgb3i_hba *hba = NULL; | ||
283 | struct s3_conn *c3cn = NULL; | ||
284 | int err = 0; | ||
285 | |||
286 | if (shost) | ||
287 | hba = iscsi_host_priv(shost); | ||
288 | |||
289 | cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); | ||
290 | |||
291 | c3cn = cxgb3i_c3cn_create(); | ||
292 | if (!c3cn) { | ||
293 | cxgb3i_log_info("ep connect OOM.\n"); | ||
294 | err = -ENOMEM; | ||
295 | goto release_conn; | ||
296 | } | ||
297 | |||
298 | err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, | ||
299 | (struct sockaddr_in *)dst_addr); | ||
300 | if (err < 0) { | ||
301 | cxgb3i_log_info("ep connect failed.\n"); | ||
302 | goto release_conn; | ||
303 | } | ||
304 | |||
305 | hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); | ||
306 | if (!hba) { | ||
307 | err = -ENOSPC; | ||
308 | cxgb3i_log_info("NOT going through cxgbi device.\n"); | ||
309 | goto release_conn; | ||
310 | } | ||
311 | |||
312 | if (shost && hba != iscsi_host_priv(shost)) { | ||
313 | err = -ENOSPC; | ||
314 | cxgb3i_log_info("Could not connect through request host%u\n", | ||
315 | shost->host_no); | ||
316 | goto release_conn; | ||
317 | } | ||
318 | |||
319 | if (c3cn_is_closing(c3cn)) { | ||
320 | err = -ENOSPC; | ||
321 | cxgb3i_log_info("ep connect unable to connect.\n"); | ||
322 | goto release_conn; | ||
323 | } | ||
324 | |||
325 | ep = iscsi_create_endpoint(sizeof(*cep)); | ||
326 | if (!ep) { | ||
327 | err = -ENOMEM; | ||
328 | cxgb3i_log_info("iscsi alloc ep, OOM.\n"); | ||
329 | goto release_conn; | ||
330 | } | ||
331 | cep = ep->dd_data; | ||
332 | cep->c3cn = c3cn; | ||
333 | cep->hba = hba; | ||
334 | |||
335 | cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n", | ||
336 | ep, cep, c3cn, hba); | ||
337 | return ep; | ||
338 | |||
339 | release_conn: | ||
340 | cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn); | ||
341 | if (c3cn) | ||
342 | cxgb3i_c3cn_release(c3cn); | ||
343 | return ERR_PTR(err); | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * cxgb3i_ep_poll - polls for TCP connection establishement | ||
348 | * @ep: TCP connection (endpoint) handle | ||
349 | * @timeout_ms: timeout value in milli secs | ||
350 | * | ||
351 | * polls for TCP connect request to complete | ||
352 | */ | ||
353 | static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | ||
354 | { | ||
355 | struct cxgb3i_endpoint *cep = ep->dd_data; | ||
356 | struct s3_conn *c3cn = cep->c3cn; | ||
357 | |||
358 | if (!c3cn_is_established(c3cn)) | ||
359 | return 0; | ||
360 | cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn); | ||
361 | return 1; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * cxgb3i_ep_disconnect - teardown TCP connection | ||
366 | * @ep: TCP connection (endpoint) handle | ||
367 | * | ||
368 | * teardown TCP connection | ||
369 | */ | ||
370 | static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep) | ||
371 | { | ||
372 | struct cxgb3i_endpoint *cep = ep->dd_data; | ||
373 | struct cxgb3i_conn *cconn = cep->cconn; | ||
374 | |||
375 | cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep); | ||
376 | |||
377 | if (cconn && cconn->conn) { | ||
378 | /* | ||
379 | * stop the xmit path so the xmit_pdu function is | ||
380 | * not being called | ||
381 | */ | ||
382 | iscsi_suspend_tx(cconn->conn); | ||
383 | |||
384 | write_lock_bh(&cep->c3cn->callback_lock); | ||
385 | cep->c3cn->user_data = NULL; | ||
386 | cconn->cep = NULL; | ||
387 | write_unlock_bh(&cep->c3cn->callback_lock); | ||
388 | } | ||
389 | |||
390 | cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n", | ||
391 | ep, cep, cep->c3cn); | ||
392 | cxgb3i_c3cn_release(cep->c3cn); | ||
393 | iscsi_destroy_endpoint(ep); | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * cxgb3i_session_create - create a new iscsi session | ||
398 | * @cmds_max: max # of commands | ||
399 | * @qdepth: scsi queue depth | ||
400 | * @initial_cmdsn: initial iscsi CMDSN for this session | ||
401 | * | ||
402 | * Creates a new iSCSI session | ||
403 | */ | ||
404 | static struct iscsi_cls_session * | ||
405 | cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, | ||
406 | u32 initial_cmdsn) | ||
407 | { | ||
408 | struct cxgb3i_endpoint *cep; | ||
409 | struct cxgb3i_hba *hba; | ||
410 | struct Scsi_Host *shost; | ||
411 | struct iscsi_cls_session *cls_session; | ||
412 | struct iscsi_session *session; | ||
413 | |||
414 | if (!ep) { | ||
415 | cxgb3i_log_error("%s, missing endpoint.\n", __func__); | ||
416 | return NULL; | ||
417 | } | ||
418 | |||
419 | cep = ep->dd_data; | ||
420 | hba = cep->hba; | ||
421 | shost = hba->shost; | ||
422 | cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba); | ||
423 | BUG_ON(hba != iscsi_host_priv(shost)); | ||
424 | |||
425 | cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, | ||
426 | cmds_max, 0, | ||
427 | sizeof(struct iscsi_tcp_task) + | ||
428 | sizeof(struct cxgb3i_task_data), | ||
429 | initial_cmdsn, ISCSI_MAX_TARGET); | ||
430 | if (!cls_session) | ||
431 | return NULL; | ||
432 | session = cls_session->dd_data; | ||
433 | if (iscsi_tcp_r2tpool_alloc(session)) | ||
434 | goto remove_session; | ||
435 | |||
436 | return cls_session; | ||
437 | |||
438 | remove_session: | ||
439 | iscsi_session_teardown(cls_session); | ||
440 | return NULL; | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * cxgb3i_session_destroy - destroys iscsi session | ||
445 | * @cls_session: pointer to iscsi cls session | ||
446 | * | ||
447 | * Destroys an iSCSI session instance and releases its all resources held | ||
448 | */ | ||
449 | static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session) | ||
450 | { | ||
451 | cxgb3i_api_debug("sess 0x%p.\n", cls_session); | ||
452 | iscsi_tcp_r2tpool_free(cls_session->dd_data); | ||
453 | iscsi_session_teardown(cls_session); | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size | ||
458 | * @conn: iscsi connection | ||
459 | * check the max. xmit pdu payload, reduce it if needed | ||
460 | */ | ||
461 | static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) | ||
462 | |||
463 | { | ||
464 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
465 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
466 | unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM); | ||
467 | |||
468 | max = min(cconn->hba->snic->tx_max_size, max); | ||
469 | if (conn->max_xmit_dlength) | ||
470 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); | ||
471 | else | ||
472 | conn->max_xmit_dlength = max; | ||
473 | align_pdu_size(conn->max_xmit_dlength); | ||
474 | cxgb3i_api_debug("conn 0x%p, max xmit %u.\n", | ||
475 | conn, conn->max_xmit_dlength); | ||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size | ||
481 | * @conn: iscsi connection | ||
482 | * return 0 if the value is valid, < 0 otherwise. | ||
483 | */ | ||
484 | static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) | ||
485 | { | ||
486 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
487 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
488 | unsigned int max = cconn->hba->snic->rx_max_size; | ||
489 | |||
490 | align_pdu_size(max); | ||
491 | if (conn->max_recv_dlength) { | ||
492 | if (conn->max_recv_dlength > max) { | ||
493 | cxgb3i_log_error("MaxRecvDataSegmentLength %u too big." | ||
494 | " Need to be <= %u.\n", | ||
495 | conn->max_recv_dlength, max); | ||
496 | return -EINVAL; | ||
497 | } | ||
498 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); | ||
499 | align_pdu_size(conn->max_recv_dlength); | ||
500 | } else | ||
501 | conn->max_recv_dlength = max; | ||
502 | cxgb3i_api_debug("conn 0x%p, max recv %u.\n", | ||
503 | conn, conn->max_recv_dlength); | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * cxgb3i_conn_create - create iscsi connection instance | ||
509 | * @cls_session: pointer to iscsi cls session | ||
510 | * @cid: iscsi cid | ||
511 | * | ||
512 | * Creates a new iSCSI connection instance for a given session | ||
513 | */ | ||
514 | static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session | ||
515 | *cls_session, u32 cid) | ||
516 | { | ||
517 | struct iscsi_cls_conn *cls_conn; | ||
518 | struct iscsi_conn *conn; | ||
519 | struct iscsi_tcp_conn *tcp_conn; | ||
520 | struct cxgb3i_conn *cconn; | ||
521 | |||
522 | cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid); | ||
523 | |||
524 | cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); | ||
525 | if (!cls_conn) | ||
526 | return NULL; | ||
527 | conn = cls_conn->dd_data; | ||
528 | tcp_conn = conn->dd_data; | ||
529 | cconn = tcp_conn->dd_data; | ||
530 | |||
531 | cconn->conn = conn; | ||
532 | return cls_conn; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together | ||
537 | * @cls_session: pointer to iscsi cls session | ||
538 | * @cls_conn: pointer to iscsi cls conn | ||
539 | * @transport_eph: 64-bit EP handle | ||
540 | * @is_leading: leading connection on this session? | ||
541 | * | ||
542 | * Binds together an iSCSI session, an iSCSI connection and a | ||
543 | * TCP connection. This routine returns error code if the TCP | ||
544 | * connection does not belong on the device iSCSI sess/conn is bound | ||
545 | */ | ||
546 | |||
547 | static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session, | ||
548 | struct iscsi_cls_conn *cls_conn, | ||
549 | u64 transport_eph, int is_leading) | ||
550 | { | ||
551 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
552 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
553 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
554 | struct cxgb3i_adapter *snic; | ||
555 | struct iscsi_endpoint *ep; | ||
556 | struct cxgb3i_endpoint *cep; | ||
557 | struct s3_conn *c3cn; | ||
558 | int err; | ||
559 | |||
560 | ep = iscsi_lookup_endpoint(transport_eph); | ||
561 | if (!ep) | ||
562 | return -EINVAL; | ||
563 | |||
564 | /* setup ddp pagesize */ | ||
565 | cep = ep->dd_data; | ||
566 | c3cn = cep->c3cn; | ||
567 | snic = cep->hba->snic; | ||
568 | err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0); | ||
569 | if (err < 0) | ||
570 | return err; | ||
571 | |||
572 | cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n", | ||
573 | ep, cls_session, cls_conn); | ||
574 | |||
575 | err = iscsi_conn_bind(cls_session, cls_conn, is_leading); | ||
576 | if (err) | ||
577 | return -EINVAL; | ||
578 | |||
579 | /* calculate the tag idx bits needed for this conn based on cmds_max */ | ||
580 | cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; | ||
581 | cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n", | ||
582 | conn->session->cmds_max, cconn->task_idx_bits); | ||
583 | |||
584 | read_lock(&c3cn->callback_lock); | ||
585 | c3cn->user_data = conn; | ||
586 | cconn->hba = cep->hba; | ||
587 | cconn->cep = cep; | ||
588 | cep->cconn = cconn; | ||
589 | read_unlock(&c3cn->callback_lock); | ||
590 | |||
591 | cxgb3i_conn_max_xmit_dlength(conn); | ||
592 | cxgb3i_conn_max_recv_dlength(conn); | ||
593 | |||
594 | spin_lock_bh(&conn->session->lock); | ||
595 | sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr); | ||
596 | conn->portal_port = ntohs(c3cn->daddr.sin_port); | ||
597 | spin_unlock_bh(&conn->session->lock); | ||
598 | |||
599 | /* init recv engine */ | ||
600 | iscsi_tcp_hdr_recv_prep(tcp_conn); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * cxgb3i_conn_get_param - return iscsi connection parameter to caller | ||
607 | * @cls_conn: pointer to iscsi cls conn | ||
608 | * @param: parameter type identifier | ||
609 | * @buf: buffer pointer | ||
610 | * | ||
611 | * returns iSCSI connection parameters | ||
612 | */ | ||
613 | static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn, | ||
614 | enum iscsi_param param, char *buf) | ||
615 | { | ||
616 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
617 | int len; | ||
618 | |||
619 | cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param); | ||
620 | |||
621 | switch (param) { | ||
622 | case ISCSI_PARAM_CONN_PORT: | ||
623 | spin_lock_bh(&conn->session->lock); | ||
624 | len = sprintf(buf, "%hu\n", conn->portal_port); | ||
625 | spin_unlock_bh(&conn->session->lock); | ||
626 | break; | ||
627 | case ISCSI_PARAM_CONN_ADDRESS: | ||
628 | spin_lock_bh(&conn->session->lock); | ||
629 | len = sprintf(buf, "%s\n", conn->portal_address); | ||
630 | spin_unlock_bh(&conn->session->lock); | ||
631 | break; | ||
632 | default: | ||
633 | return iscsi_conn_get_param(cls_conn, param, buf); | ||
634 | } | ||
635 | |||
636 | return len; | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * cxgb3i_conn_set_param - set iscsi connection parameter | ||
641 | * @cls_conn: pointer to iscsi cls conn | ||
642 | * @param: parameter type identifier | ||
643 | * @buf: buffer pointer | ||
644 | * @buflen: buffer length | ||
645 | * | ||
646 | * set iSCSI connection parameters | ||
647 | */ | ||
648 | static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn, | ||
649 | enum iscsi_param param, char *buf, int buflen) | ||
650 | { | ||
651 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
652 | struct iscsi_session *session = conn->session; | ||
653 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
654 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
655 | struct cxgb3i_adapter *snic = cconn->hba->snic; | ||
656 | struct s3_conn *c3cn = cconn->cep->c3cn; | ||
657 | int value, err = 0; | ||
658 | |||
659 | switch (param) { | ||
660 | case ISCSI_PARAM_HDRDGST_EN: | ||
661 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
662 | if (!err && conn->hdrdgst_en) | ||
663 | err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, | ||
664 | conn->hdrdgst_en, | ||
665 | conn->datadgst_en, 0); | ||
666 | break; | ||
667 | case ISCSI_PARAM_DATADGST_EN: | ||
668 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
669 | if (!err && conn->datadgst_en) | ||
670 | err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, | ||
671 | conn->hdrdgst_en, | ||
672 | conn->datadgst_en, 0); | ||
673 | break; | ||
674 | case ISCSI_PARAM_MAX_R2T: | ||
675 | sscanf(buf, "%d", &value); | ||
676 | if (value <= 0 || !is_power_of_2(value)) | ||
677 | return -EINVAL; | ||
678 | if (session->max_r2t == value) | ||
679 | break; | ||
680 | iscsi_tcp_r2tpool_free(session); | ||
681 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
682 | if (!err && iscsi_tcp_r2tpool_alloc(session)) | ||
683 | return -ENOMEM; | ||
684 | case ISCSI_PARAM_MAX_RECV_DLENGTH: | ||
685 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
686 | if (!err) | ||
687 | err = cxgb3i_conn_max_recv_dlength(conn); | ||
688 | break; | ||
689 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: | ||
690 | err = iscsi_set_param(cls_conn, param, buf, buflen); | ||
691 | if (!err) | ||
692 | err = cxgb3i_conn_max_xmit_dlength(conn); | ||
693 | break; | ||
694 | default: | ||
695 | return iscsi_set_param(cls_conn, param, buf, buflen); | ||
696 | } | ||
697 | return err; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * cxgb3i_host_set_param - configure host (adapter) related parameters | ||
702 | * @shost: scsi host pointer | ||
703 | * @param: parameter type identifier | ||
704 | * @buf: buffer pointer | ||
705 | */ | ||
706 | static int cxgb3i_host_set_param(struct Scsi_Host *shost, | ||
707 | enum iscsi_host_param param, | ||
708 | char *buf, int buflen) | ||
709 | { | ||
710 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); | ||
711 | |||
712 | if (!hba->ndev) { | ||
713 | shost_printk(KERN_ERR, shost, "Could not set host param. " | ||
714 | "Netdev for host not set.\n"); | ||
715 | return -ENODEV; | ||
716 | } | ||
717 | |||
718 | cxgb3i_api_debug("param %d, buf %s.\n", param, buf); | ||
719 | |||
720 | switch (param) { | ||
721 | case ISCSI_HOST_PARAM_IPADDRESS: | ||
722 | { | ||
723 | __be32 addr = in_aton(buf); | ||
724 | cxgb3i_set_private_ipv4addr(hba->ndev, addr); | ||
725 | return 0; | ||
726 | } | ||
727 | case ISCSI_HOST_PARAM_HWADDRESS: | ||
728 | case ISCSI_HOST_PARAM_NETDEV_NAME: | ||
729 | /* ignore */ | ||
730 | return 0; | ||
731 | default: | ||
732 | return iscsi_host_set_param(shost, param, buf, buflen); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | /** | ||
737 | * cxgb3i_host_get_param - returns host (adapter) related parameters | ||
738 | * @shost: scsi host pointer | ||
739 | * @param: parameter type identifier | ||
740 | * @buf: buffer pointer | ||
741 | */ | ||
742 | static int cxgb3i_host_get_param(struct Scsi_Host *shost, | ||
743 | enum iscsi_host_param param, char *buf) | ||
744 | { | ||
745 | struct cxgb3i_hba *hba = iscsi_host_priv(shost); | ||
746 | int len = 0; | ||
747 | |||
748 | if (!hba->ndev) { | ||
749 | shost_printk(KERN_ERR, shost, "Could not set host param. " | ||
750 | "Netdev for host not set.\n"); | ||
751 | return -ENODEV; | ||
752 | } | ||
753 | |||
754 | cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); | ||
755 | |||
756 | switch (param) { | ||
757 | case ISCSI_HOST_PARAM_HWADDRESS: | ||
758 | len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6); | ||
759 | break; | ||
760 | case ISCSI_HOST_PARAM_NETDEV_NAME: | ||
761 | len = sprintf(buf, "%s\n", hba->ndev->name); | ||
762 | break; | ||
763 | case ISCSI_HOST_PARAM_IPADDRESS: | ||
764 | { | ||
765 | __be32 addr; | ||
766 | |||
767 | addr = cxgb3i_get_private_ipv4addr(hba->ndev); | ||
768 | len = sprintf(buf, "%pI4", &addr); | ||
769 | break; | ||
770 | } | ||
771 | default: | ||
772 | return iscsi_host_get_param(shost, param, buf); | ||
773 | } | ||
774 | return len; | ||
775 | } | ||
776 | |||
777 | /** | ||
778 | * cxgb3i_conn_get_stats - returns iSCSI stats | ||
779 | * @cls_conn: pointer to iscsi cls conn | ||
780 | * @stats: pointer to iscsi statistic struct | ||
781 | */ | ||
782 | static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn, | ||
783 | struct iscsi_stats *stats) | ||
784 | { | ||
785 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
786 | |||
787 | stats->txdata_octets = conn->txdata_octets; | ||
788 | stats->rxdata_octets = conn->rxdata_octets; | ||
789 | stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; | ||
790 | stats->dataout_pdus = conn->dataout_pdus_cnt; | ||
791 | stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; | ||
792 | stats->datain_pdus = conn->datain_pdus_cnt; | ||
793 | stats->r2t_pdus = conn->r2t_pdus_cnt; | ||
794 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; | ||
795 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; | ||
796 | stats->digest_err = 0; | ||
797 | stats->timeout_err = 0; | ||
798 | stats->custom_length = 1; | ||
799 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); | ||
800 | stats->custom[0].value = conn->eh_abort_cnt; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * cxgb3i_parse_itt - get the idx and age bits from a given tag | ||
805 | * @conn: iscsi connection | ||
806 | * @itt: itt tag | ||
807 | * @idx: task index, filled in by this function | ||
808 | * @age: session age, filled in by this function | ||
809 | */ | ||
810 | static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt, | ||
811 | int *idx, int *age) | ||
812 | { | ||
813 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
814 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
815 | struct cxgb3i_adapter *snic = cconn->hba->snic; | ||
816 | u32 tag = ntohl((__force u32) itt); | ||
817 | u32 sw_bits; | ||
818 | |||
819 | sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag); | ||
820 | if (idx) | ||
821 | *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); | ||
822 | if (age) | ||
823 | *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; | ||
824 | |||
825 | cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n", | ||
826 | tag, itt, sw_bits, idx ? *idx : 0xFFFFF, | ||
827 | age ? *age : 0xFF); | ||
828 | } | ||
829 | |||
830 | /** | ||
831 | * cxgb3i_reserve_itt - generate tag for a give task | ||
832 | * @task: iscsi task | ||
833 | * @hdr_itt: tag, filled in by this function | ||
834 | * Set up ddp for scsi read tasks if possible. | ||
835 | */ | ||
836 | int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) | ||
837 | { | ||
838 | struct scsi_cmnd *sc = task->sc; | ||
839 | struct iscsi_conn *conn = task->conn; | ||
840 | struct iscsi_session *sess = conn->session; | ||
841 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
842 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
843 | struct cxgb3i_adapter *snic = cconn->hba->snic; | ||
844 | struct cxgb3i_tag_format *tformat = &snic->tag_format; | ||
845 | u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; | ||
846 | u32 tag; | ||
847 | int err = -EINVAL; | ||
848 | |||
849 | if (sc && | ||
850 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && | ||
851 | cxgb3i_sw_tag_usable(tformat, sw_tag)) { | ||
852 | struct s3_conn *c3cn = cconn->cep->c3cn; | ||
853 | struct cxgb3i_gather_list *gl; | ||
854 | |||
855 | gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length, | ||
856 | scsi_in(sc)->table.sgl, | ||
857 | scsi_in(sc)->table.nents, | ||
858 | snic->pdev, | ||
859 | GFP_ATOMIC); | ||
860 | if (gl) { | ||
861 | tag = sw_tag; | ||
862 | err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid, | ||
863 | tformat, &tag, | ||
864 | gl, GFP_ATOMIC); | ||
865 | if (err < 0) | ||
866 | cxgb3i_ddp_release_gl(gl, snic->pdev); | ||
867 | } | ||
868 | } | ||
869 | |||
870 | if (err < 0) | ||
871 | tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag); | ||
872 | /* the itt need to sent in big-endian order */ | ||
873 | *hdr_itt = (__force itt_t)htonl(tag); | ||
874 | |||
875 | cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n", | ||
876 | tag, *hdr_itt, task->itt, sess->age); | ||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | /** | ||
881 | * cxgb3i_release_itt - release the tag for a given task | ||
882 | * @task: iscsi task | ||
883 | * @hdr_itt: tag | ||
884 | * If the tag is a ddp tag, release the ddp setup | ||
885 | */ | ||
886 | void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt) | ||
887 | { | ||
888 | struct scsi_cmnd *sc = task->sc; | ||
889 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | ||
890 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
891 | struct cxgb3i_adapter *snic = cconn->hba->snic; | ||
892 | struct cxgb3i_tag_format *tformat = &snic->tag_format; | ||
893 | u32 tag = ntohl((__force u32)hdr_itt); | ||
894 | |||
895 | cxgb3i_tag_debug("release tag 0x%x.\n", tag); | ||
896 | |||
897 | if (sc && | ||
898 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && | ||
899 | cxgb3i_is_ddp_tag(tformat, tag)) | ||
900 | cxgb3i_ddp_tag_release(snic->tdev, tag); | ||
901 | } | ||
902 | |||
903 | /** | ||
904 | * cxgb3i_host_template -- Scsi_Host_Template structure | ||
905 | * used when registering with the scsi mid layer | ||
906 | */ | ||
907 | static struct scsi_host_template cxgb3i_host_template = { | ||
908 | .module = THIS_MODULE, | ||
909 | .name = "Chelsio S3xx iSCSI Initiator", | ||
910 | .proc_name = "cxgb3i", | ||
911 | .queuecommand = iscsi_queuecommand, | ||
912 | .change_queue_depth = iscsi_change_queue_depth, | ||
913 | .can_queue = CXGB3I_SCSI_HOST_QDEPTH, | ||
914 | .sg_tablesize = SG_ALL, | ||
915 | .max_sectors = 0xFFFF, | ||
916 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | ||
917 | .eh_abort_handler = iscsi_eh_abort, | ||
918 | .eh_device_reset_handler = iscsi_eh_device_reset, | ||
919 | .eh_target_reset_handler = iscsi_eh_recover_target, | ||
920 | .target_alloc = iscsi_target_alloc, | ||
921 | .use_clustering = DISABLE_CLUSTERING, | ||
922 | .this_id = -1, | ||
923 | }; | ||
924 | |||
925 | static struct iscsi_transport cxgb3i_iscsi_transport = { | ||
926 | .owner = THIS_MODULE, | ||
927 | .name = "cxgb3i", | ||
928 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | ||
929 | | CAP_DATADGST | CAP_DIGEST_OFFLOAD | | ||
930 | CAP_PADDING_OFFLOAD, | ||
931 | .param_mask = ISCSI_MAX_RECV_DLENGTH | | ||
932 | ISCSI_MAX_XMIT_DLENGTH | | ||
933 | ISCSI_HDRDGST_EN | | ||
934 | ISCSI_DATADGST_EN | | ||
935 | ISCSI_INITIAL_R2T_EN | | ||
936 | ISCSI_MAX_R2T | | ||
937 | ISCSI_IMM_DATA_EN | | ||
938 | ISCSI_FIRST_BURST | | ||
939 | ISCSI_MAX_BURST | | ||
940 | ISCSI_PDU_INORDER_EN | | ||
941 | ISCSI_DATASEQ_INORDER_EN | | ||
942 | ISCSI_ERL | | ||
943 | ISCSI_CONN_PORT | | ||
944 | ISCSI_CONN_ADDRESS | | ||
945 | ISCSI_EXP_STATSN | | ||
946 | ISCSI_PERSISTENT_PORT | | ||
947 | ISCSI_PERSISTENT_ADDRESS | | ||
948 | ISCSI_TARGET_NAME | ISCSI_TPGT | | ||
949 | ISCSI_USERNAME | ISCSI_PASSWORD | | ||
950 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | ||
951 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | ||
952 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | | ||
953 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | ||
954 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | ||
955 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | ||
956 | ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME, | ||
957 | .get_host_param = cxgb3i_host_get_param, | ||
958 | .set_host_param = cxgb3i_host_set_param, | ||
959 | /* session management */ | ||
960 | .create_session = cxgb3i_session_create, | ||
961 | .destroy_session = cxgb3i_session_destroy, | ||
962 | .get_session_param = iscsi_session_get_param, | ||
963 | /* connection management */ | ||
964 | .create_conn = cxgb3i_conn_create, | ||
965 | .bind_conn = cxgb3i_conn_bind, | ||
966 | .destroy_conn = iscsi_tcp_conn_teardown, | ||
967 | .start_conn = iscsi_conn_start, | ||
968 | .stop_conn = iscsi_conn_stop, | ||
969 | .get_conn_param = cxgb3i_conn_get_param, | ||
970 | .set_param = cxgb3i_conn_set_param, | ||
971 | .get_stats = cxgb3i_conn_get_stats, | ||
972 | /* pdu xmit req. from user space */ | ||
973 | .send_pdu = iscsi_conn_send_pdu, | ||
974 | /* task */ | ||
975 | .init_task = iscsi_tcp_task_init, | ||
976 | .xmit_task = iscsi_tcp_task_xmit, | ||
977 | .cleanup_task = cxgb3i_conn_cleanup_task, | ||
978 | |||
979 | /* pdu */ | ||
980 | .alloc_pdu = cxgb3i_conn_alloc_pdu, | ||
981 | .init_pdu = cxgb3i_conn_init_pdu, | ||
982 | .xmit_pdu = cxgb3i_conn_xmit_pdu, | ||
983 | .parse_pdu_itt = cxgb3i_parse_itt, | ||
984 | |||
985 | /* TCP connect/disconnect */ | ||
986 | .ep_connect = cxgb3i_ep_connect, | ||
987 | .ep_poll = cxgb3i_ep_poll, | ||
988 | .ep_disconnect = cxgb3i_ep_disconnect, | ||
989 | /* Error recovery timeout call */ | ||
990 | .session_recovery_timedout = iscsi_session_recovery_timedout, | ||
991 | }; | ||
992 | |||
993 | int cxgb3i_iscsi_init(void) | ||
994 | { | ||
995 | sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; | ||
996 | sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; | ||
997 | cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", | ||
998 | ISCSI_ITT_MASK, sw_tag_idx_bits, | ||
999 | ISCSI_AGE_MASK, sw_tag_age_bits); | ||
1000 | |||
1001 | cxgb3i_scsi_transport = | ||
1002 | iscsi_register_transport(&cxgb3i_iscsi_transport); | ||
1003 | if (!cxgb3i_scsi_transport) { | ||
1004 | cxgb3i_log_error("Could not register cxgb3i transport.\n"); | ||
1005 | return -ENODEV; | ||
1006 | } | ||
1007 | cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport); | ||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | void cxgb3i_iscsi_cleanup(void) | ||
1012 | { | ||
1013 | if (cxgb3i_scsi_transport) { | ||
1014 | cxgb3i_api_debug("cxgb3i transport 0x%p.\n", | ||
1015 | cxgb3i_scsi_transport); | ||
1016 | iscsi_unregister_transport(&cxgb3i_iscsi_transport); | ||
1017 | } | ||
1018 | } | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c deleted file mode 100644 index 3ee13cf9556b..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ /dev/null | |||
@@ -1,1944 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management | ||
3 | * | ||
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | ||
9 | * release for licensing terms and conditions. | ||
10 | * | ||
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | ||
12 | * Karen Xie (kxie@chelsio.com) | ||
13 | */ | ||
14 | |||
15 | #include <linux/if_vlan.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/version.h> | ||
18 | |||
19 | #include "cxgb3_defs.h" | ||
20 | #include "cxgb3_ctl_defs.h" | ||
21 | #include "firmware_exports.h" | ||
22 | #include "cxgb3i_offload.h" | ||
23 | #include "cxgb3i_pdu.h" | ||
24 | #include "cxgb3i_ddp.h" | ||
25 | |||
26 | #ifdef __DEBUG_C3CN_CONN__ | ||
27 | #define c3cn_conn_debug cxgb3i_log_debug | ||
28 | #else | ||
29 | #define c3cn_conn_debug(fmt...) | ||
30 | #endif | ||
31 | |||
32 | #ifdef __DEBUG_C3CN_TX__ | ||
33 | #define c3cn_tx_debug cxgb3i_log_debug | ||
34 | #else | ||
35 | #define c3cn_tx_debug(fmt...) | ||
36 | #endif | ||
37 | |||
38 | #ifdef __DEBUG_C3CN_RX__ | ||
39 | #define c3cn_rx_debug cxgb3i_log_debug | ||
40 | #else | ||
41 | #define c3cn_rx_debug(fmt...) | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * module parameters releated to offloaded iscsi connection | ||
46 | */ | ||
47 | static int cxgb3_rcv_win = 256 * 1024; | ||
48 | module_param(cxgb3_rcv_win, int, 0644); | ||
49 | MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); | ||
50 | |||
51 | static int cxgb3_snd_win = 128 * 1024; | ||
52 | module_param(cxgb3_snd_win, int, 0644); | ||
53 | MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)"); | ||
54 | |||
55 | static int cxgb3_rx_credit_thres = 10 * 1024; | ||
56 | module_param(cxgb3_rx_credit_thres, int, 0644); | ||
57 | MODULE_PARM_DESC(rx_credit_thres, | ||
58 | "RX credits return threshold in bytes (default=10KB)"); | ||
59 | |||
60 | static unsigned int cxgb3_max_connect = 8 * 1024; | ||
61 | module_param(cxgb3_max_connect, uint, 0644); | ||
62 | MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)"); | ||
63 | |||
64 | static unsigned int cxgb3_sport_base = 20000; | ||
65 | module_param(cxgb3_sport_base, uint, 0644); | ||
66 | MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)"); | ||
67 | |||
68 | /* | ||
69 | * cxgb3i tcp connection data(per adapter) list | ||
70 | */ | ||
71 | static LIST_HEAD(cdata_list); | ||
72 | static DEFINE_RWLOCK(cdata_rwlock); | ||
73 | |||
74 | static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion); | ||
75 | static void c3cn_release_offload_resources(struct s3_conn *c3cn); | ||
76 | |||
77 | /* | ||
78 | * iscsi source port management | ||
79 | * | ||
80 | * Find a free source port in the port allocation map. We use a very simple | ||
81 | * rotor scheme to look for the next free port. | ||
82 | * | ||
83 | * If a source port has been specified make sure that it doesn't collide with | ||
84 | * our normal source port allocation map. If it's outside the range of our | ||
85 | * allocation/deallocation scheme just let them use it. | ||
86 | * | ||
87 | * If the source port is outside our allocation range, the caller is | ||
88 | * responsible for keeping track of their port usage. | ||
89 | */ | ||
90 | static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata) | ||
91 | { | ||
92 | unsigned int start; | ||
93 | int idx; | ||
94 | |||
95 | if (!cdata) | ||
96 | goto error_out; | ||
97 | |||
98 | if (c3cn->saddr.sin_port) { | ||
99 | cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n", | ||
100 | c3cn->saddr.sin_port); | ||
101 | return -EADDRINUSE; | ||
102 | } | ||
103 | |||
104 | spin_lock_bh(&cdata->lock); | ||
105 | start = idx = cdata->sport_next; | ||
106 | do { | ||
107 | if (++idx >= cxgb3_max_connect) | ||
108 | idx = 0; | ||
109 | if (!cdata->sport_conn[idx]) { | ||
110 | c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx); | ||
111 | cdata->sport_next = idx; | ||
112 | cdata->sport_conn[idx] = c3cn; | ||
113 | spin_unlock_bh(&cdata->lock); | ||
114 | |||
115 | c3cn_conn_debug("%s reserve port %u.\n", | ||
116 | cdata->cdev->name, | ||
117 | cxgb3_sport_base + idx); | ||
118 | return 0; | ||
119 | } | ||
120 | } while (idx != start); | ||
121 | spin_unlock_bh(&cdata->lock); | ||
122 | |||
123 | error_out: | ||
124 | return -EADDRNOTAVAIL; | ||
125 | } | ||
126 | |||
127 | static void c3cn_put_port(struct s3_conn *c3cn) | ||
128 | { | ||
129 | if (!c3cn->cdev) | ||
130 | return; | ||
131 | |||
132 | if (c3cn->saddr.sin_port) { | ||
133 | struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev); | ||
134 | int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base; | ||
135 | |||
136 | c3cn->saddr.sin_port = 0; | ||
137 | if (idx < 0 || idx >= cxgb3_max_connect) | ||
138 | return; | ||
139 | spin_lock_bh(&cdata->lock); | ||
140 | cdata->sport_conn[idx] = NULL; | ||
141 | spin_unlock_bh(&cdata->lock); | ||
142 | c3cn_conn_debug("%s, release port %u.\n", | ||
143 | cdata->cdev->name, cxgb3_sport_base + idx); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag) | ||
148 | { | ||
149 | __set_bit(flag, &c3cn->flags); | ||
150 | c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n", | ||
151 | c3cn, flag, c3cn->state, c3cn->flags); | ||
152 | } | ||
153 | |||
154 | static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag) | ||
155 | { | ||
156 | __clear_bit(flag, &c3cn->flags); | ||
157 | c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n", | ||
158 | c3cn, flag, c3cn->state, c3cn->flags); | ||
159 | } | ||
160 | |||
161 | static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag) | ||
162 | { | ||
163 | if (c3cn == NULL) | ||
164 | return 0; | ||
165 | return test_bit(flag, &c3cn->flags); | ||
166 | } | ||
167 | |||
168 | static void c3cn_set_state(struct s3_conn *c3cn, int state) | ||
169 | { | ||
170 | c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state); | ||
171 | c3cn->state = state; | ||
172 | } | ||
173 | |||
174 | static inline void c3cn_hold(struct s3_conn *c3cn) | ||
175 | { | ||
176 | atomic_inc(&c3cn->refcnt); | ||
177 | } | ||
178 | |||
179 | static inline void c3cn_put(struct s3_conn *c3cn) | ||
180 | { | ||
181 | if (atomic_dec_and_test(&c3cn->refcnt)) { | ||
182 | c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n", | ||
183 | c3cn, c3cn->state, c3cn->flags); | ||
184 | kfree(c3cn); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void c3cn_closed(struct s3_conn *c3cn) | ||
189 | { | ||
190 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
191 | c3cn, c3cn->state, c3cn->flags); | ||
192 | |||
193 | c3cn_put_port(c3cn); | ||
194 | c3cn_release_offload_resources(c3cn); | ||
195 | c3cn_set_state(c3cn, C3CN_STATE_CLOSED); | ||
196 | cxgb3i_conn_closing(c3cn); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * CPL (Chelsio Protocol Language) defines a message passing interface between | ||
201 | * the host driver and T3 asic. | ||
202 | * The section below implments CPLs that related to iscsi tcp connection | ||
203 | * open/close/abort and data send/receive. | ||
204 | */ | ||
205 | |||
206 | /* | ||
207 | * CPL connection active open request: host -> | ||
208 | */ | ||
209 | static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) | ||
210 | { | ||
211 | int i = 0; | ||
212 | |||
213 | while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) | ||
214 | ++i; | ||
215 | return i; | ||
216 | } | ||
217 | |||
218 | static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu) | ||
219 | { | ||
220 | unsigned int idx; | ||
221 | struct dst_entry *dst = c3cn->dst_cache; | ||
222 | struct t3cdev *cdev = c3cn->cdev; | ||
223 | const struct t3c_data *td = T3C_DATA(cdev); | ||
224 | u16 advmss = dst_metric(dst, RTAX_ADVMSS); | ||
225 | |||
226 | if (advmss > pmtu - 40) | ||
227 | advmss = pmtu - 40; | ||
228 | if (advmss < td->mtus[0] - 40) | ||
229 | advmss = td->mtus[0] - 40; | ||
230 | idx = find_best_mtu(td, advmss + 40); | ||
231 | return idx; | ||
232 | } | ||
233 | |||
234 | static inline int compute_wscale(int win) | ||
235 | { | ||
236 | int wscale = 0; | ||
237 | while (wscale < 14 && (65535<<wscale) < win) | ||
238 | wscale++; | ||
239 | return wscale; | ||
240 | } | ||
241 | |||
242 | static inline unsigned int calc_opt0h(struct s3_conn *c3cn) | ||
243 | { | ||
244 | int wscale = compute_wscale(cxgb3_rcv_win); | ||
245 | return V_KEEP_ALIVE(1) | | ||
246 | F_TCAM_BYPASS | | ||
247 | V_WND_SCALE(wscale) | | ||
248 | V_MSS_IDX(c3cn->mss_idx); | ||
249 | } | ||
250 | |||
251 | static inline unsigned int calc_opt0l(struct s3_conn *c3cn) | ||
252 | { | ||
253 | return V_ULP_MODE(ULP_MODE_ISCSI) | | ||
254 | V_RCV_BUFSIZ(cxgb3_rcv_win>>10); | ||
255 | } | ||
256 | |||
257 | static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb, | ||
258 | unsigned int atid, const struct l2t_entry *e) | ||
259 | { | ||
260 | struct cpl_act_open_req *req; | ||
261 | |||
262 | c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid); | ||
263 | |||
264 | skb->priority = CPL_PRIORITY_SETUP; | ||
265 | req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req)); | ||
266 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
267 | req->wr.wr_lo = 0; | ||
268 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid)); | ||
269 | req->local_port = c3cn->saddr.sin_port; | ||
270 | req->peer_port = c3cn->daddr.sin_port; | ||
271 | req->local_ip = c3cn->saddr.sin_addr.s_addr; | ||
272 | req->peer_ip = c3cn->daddr.sin_addr.s_addr; | ||
273 | req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) | | ||
274 | V_TX_CHANNEL(e->smt_idx)); | ||
275 | req->opt0l = htonl(calc_opt0l(c3cn)); | ||
276 | req->params = 0; | ||
277 | req->opt2 = 0; | ||
278 | } | ||
279 | |||
280 | static void fail_act_open(struct s3_conn *c3cn, int errno) | ||
281 | { | ||
282 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
283 | c3cn, c3cn->state, c3cn->flags); | ||
284 | c3cn->err = errno; | ||
285 | c3cn_closed(c3cn); | ||
286 | } | ||
287 | |||
288 | static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | ||
289 | { | ||
290 | struct s3_conn *c3cn = (struct s3_conn *)skb->sk; | ||
291 | |||
292 | c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); | ||
293 | |||
294 | c3cn_hold(c3cn); | ||
295 | spin_lock_bh(&c3cn->lock); | ||
296 | if (c3cn->state == C3CN_STATE_CONNECTING) | ||
297 | fail_act_open(c3cn, -EHOSTUNREACH); | ||
298 | spin_unlock_bh(&c3cn->lock); | ||
299 | c3cn_put(c3cn); | ||
300 | __kfree_skb(skb); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * CPL connection close request: host -> | ||
305 | * | ||
306 | * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to | ||
307 | * the write queue (i.e., after any unsent txt data). | ||
308 | */ | ||
309 | static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, | ||
310 | int flags) | ||
311 | { | ||
312 | skb_tcp_seq(skb) = c3cn->write_seq; | ||
313 | skb_flags(skb) = flags; | ||
314 | __skb_queue_tail(&c3cn->write_queue, skb); | ||
315 | } | ||
316 | |||
317 | static void send_close_req(struct s3_conn *c3cn) | ||
318 | { | ||
319 | struct sk_buff *skb = c3cn->cpl_close; | ||
320 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; | ||
321 | unsigned int tid = c3cn->tid; | ||
322 | |||
323 | c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n", | ||
324 | c3cn, c3cn->state, c3cn->flags); | ||
325 | |||
326 | c3cn->cpl_close = NULL; | ||
327 | |||
328 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | ||
329 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | ||
330 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | ||
331 | req->rsvd = htonl(c3cn->write_seq); | ||
332 | |||
333 | skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND); | ||
334 | if (c3cn->state != C3CN_STATE_CONNECTING) | ||
335 | c3cn_push_tx_frames(c3cn, 1); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * CPL connection abort request: host -> | ||
340 | * | ||
341 | * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs | ||
342 | * for the same connection and also that we do not try to send a message | ||
343 | * after the connection has closed. | ||
344 | */ | ||
345 | static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb) | ||
346 | { | ||
347 | struct cpl_abort_req *req = cplhdr(skb); | ||
348 | |||
349 | c3cn_conn_debug("tdev 0x%p.\n", cdev); | ||
350 | |||
351 | req->cmd = CPL_ABORT_NO_RST; | ||
352 | cxgb3_ofld_send(cdev, skb); | ||
353 | } | ||
354 | |||
355 | static inline void c3cn_purge_write_queue(struct s3_conn *c3cn) | ||
356 | { | ||
357 | struct sk_buff *skb; | ||
358 | |||
359 | while ((skb = __skb_dequeue(&c3cn->write_queue))) | ||
360 | __kfree_skb(skb); | ||
361 | } | ||
362 | |||
363 | static void send_abort_req(struct s3_conn *c3cn) | ||
364 | { | ||
365 | struct sk_buff *skb = c3cn->cpl_abort_req; | ||
366 | struct cpl_abort_req *req; | ||
367 | unsigned int tid = c3cn->tid; | ||
368 | |||
369 | if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb || | ||
370 | !c3cn->cdev) | ||
371 | return; | ||
372 | |||
373 | c3cn_set_state(c3cn, C3CN_STATE_ABORTING); | ||
374 | |||
375 | c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn); | ||
376 | |||
377 | c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING); | ||
378 | |||
379 | /* Purge the send queue so we don't send anything after an abort. */ | ||
380 | c3cn_purge_write_queue(c3cn); | ||
381 | |||
382 | c3cn->cpl_abort_req = NULL; | ||
383 | req = (struct cpl_abort_req *)skb->head; | ||
384 | memset(req, 0, sizeof(*req)); | ||
385 | |||
386 | skb->priority = CPL_PRIORITY_DATA; | ||
387 | set_arp_failure_handler(skb, abort_arp_failure); | ||
388 | |||
389 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | ||
390 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | ||
391 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); | ||
392 | req->rsvd0 = htonl(c3cn->snd_nxt); | ||
393 | req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT); | ||
394 | req->cmd = CPL_ABORT_SEND_RST; | ||
395 | |||
396 | l2t_send(c3cn->cdev, skb, c3cn->l2t); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * CPL connection abort reply: host -> | ||
401 | * | ||
402 | * Send an ABORT_RPL message in response of the ABORT_REQ received. | ||
403 | */ | ||
404 | static void send_abort_rpl(struct s3_conn *c3cn, int rst_status) | ||
405 | { | ||
406 | struct sk_buff *skb = c3cn->cpl_abort_rpl; | ||
407 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; | ||
408 | |||
409 | c3cn->cpl_abort_rpl = NULL; | ||
410 | |||
411 | skb->priority = CPL_PRIORITY_DATA; | ||
412 | memset(rpl, 0, sizeof(*rpl)); | ||
413 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | ||
414 | rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid)); | ||
415 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid)); | ||
416 | rpl->cmd = rst_status; | ||
417 | |||
418 | cxgb3_ofld_send(c3cn->cdev, skb); | ||
419 | } | ||
420 | |||
421 | /* | ||
422 | * CPL connection rx data ack: host -> | ||
423 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of | ||
424 | * credits sent. | ||
425 | */ | ||
426 | static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack) | ||
427 | { | ||
428 | struct sk_buff *skb; | ||
429 | struct cpl_rx_data_ack *req; | ||
430 | |||
431 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | ||
432 | if (!skb) | ||
433 | return 0; | ||
434 | |||
435 | req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req)); | ||
436 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
437 | req->wr.wr_lo = 0; | ||
438 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid)); | ||
439 | req->credit_dack = htonl(dack | V_RX_CREDITS(credits)); | ||
440 | skb->priority = CPL_PRIORITY_ACK; | ||
441 | cxgb3_ofld_send(c3cn->cdev, skb); | ||
442 | return credits; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * CPL connection tx data: host -> | ||
447 | * | ||
448 | * Send iscsi PDU via TX_DATA CPL message. Returns the number of | ||
449 | * credits sent. | ||
450 | * Each TX_DATA consumes work request credit (wrs), so we need to keep track of | ||
451 | * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). | ||
452 | */ | ||
453 | |||
454 | /* | ||
455 | * For ULP connections HW may inserts digest bytes into the pdu. Those digest | ||
456 | * bytes are not sent by the host but are part of the TCP payload and therefore | ||
457 | * consume TCP sequence space. | ||
458 | */ | ||
459 | static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 }; | ||
460 | static inline unsigned int ulp_extra_len(const struct sk_buff *skb) | ||
461 | { | ||
462 | return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3]; | ||
463 | } | ||
464 | |||
465 | static unsigned int wrlen __read_mostly; | ||
466 | |||
467 | /* | ||
468 | * The number of WRs needed for an skb depends on the number of fragments | ||
469 | * in the skb and whether it has any payload in its main body. This maps the | ||
470 | * length of the gather list represented by an skb into the # of necessary WRs. | ||
471 | * The extra two fragments are for iscsi bhs and payload padding. | ||
472 | */ | ||
473 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) | ||
474 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | ||
475 | |||
476 | static void s3_init_wr_tab(unsigned int wr_len) | ||
477 | { | ||
478 | int i; | ||
479 | |||
480 | if (skb_wrs[1]) /* already initialized */ | ||
481 | return; | ||
482 | |||
483 | for (i = 1; i < SKB_WR_LIST_SIZE; i++) { | ||
484 | int sgl_len = (3 * i) / 2 + (i & 1); | ||
485 | |||
486 | sgl_len += 3; | ||
487 | skb_wrs[i] = (sgl_len <= wr_len | ||
488 | ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); | ||
489 | } | ||
490 | |||
491 | wrlen = wr_len * 8; | ||
492 | } | ||
493 | |||
494 | static inline void reset_wr_list(struct s3_conn *c3cn) | ||
495 | { | ||
496 | c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL; | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * Add a WR to a connections's list of pending WRs. This is a singly-linked | ||
501 | * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head | ||
502 | * and the tail in wr_pending_tail. | ||
503 | */ | ||
504 | static inline void enqueue_wr(struct s3_conn *c3cn, | ||
505 | struct sk_buff *skb) | ||
506 | { | ||
507 | skb_tx_wr_next(skb) = NULL; | ||
508 | |||
509 | /* | ||
510 | * We want to take an extra reference since both us and the driver | ||
511 | * need to free the packet before it's really freed. We know there's | ||
512 | * just one user currently so we use atomic_set rather than skb_get | ||
513 | * to avoid the atomic op. | ||
514 | */ | ||
515 | atomic_set(&skb->users, 2); | ||
516 | |||
517 | if (!c3cn->wr_pending_head) | ||
518 | c3cn->wr_pending_head = skb; | ||
519 | else | ||
520 | skb_tx_wr_next(c3cn->wr_pending_tail) = skb; | ||
521 | c3cn->wr_pending_tail = skb; | ||
522 | } | ||
523 | |||
524 | static int count_pending_wrs(struct s3_conn *c3cn) | ||
525 | { | ||
526 | int n = 0; | ||
527 | const struct sk_buff *skb = c3cn->wr_pending_head; | ||
528 | |||
529 | while (skb) { | ||
530 | n += skb->csum; | ||
531 | skb = skb_tx_wr_next(skb); | ||
532 | } | ||
533 | return n; | ||
534 | } | ||
535 | |||
536 | static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) | ||
537 | { | ||
538 | return c3cn->wr_pending_head; | ||
539 | } | ||
540 | |||
541 | static inline void free_wr_skb(struct sk_buff *skb) | ||
542 | { | ||
543 | kfree_skb(skb); | ||
544 | } | ||
545 | |||
546 | static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn) | ||
547 | { | ||
548 | struct sk_buff *skb = c3cn->wr_pending_head; | ||
549 | |||
550 | if (likely(skb)) { | ||
551 | /* Don't bother clearing the tail */ | ||
552 | c3cn->wr_pending_head = skb_tx_wr_next(skb); | ||
553 | skb_tx_wr_next(skb) = NULL; | ||
554 | } | ||
555 | return skb; | ||
556 | } | ||
557 | |||
558 | static void purge_wr_queue(struct s3_conn *c3cn) | ||
559 | { | ||
560 | struct sk_buff *skb; | ||
561 | while ((skb = dequeue_wr(c3cn)) != NULL) | ||
562 | free_wr_skb(skb); | ||
563 | } | ||
564 | |||
565 | static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, | ||
566 | int len, int req_completion) | ||
567 | { | ||
568 | struct tx_data_wr *req; | ||
569 | |||
570 | skb_reset_transport_header(skb); | ||
571 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | ||
572 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | | ||
573 | (req_completion ? F_WR_COMPL : 0)); | ||
574 | req->wr_lo = htonl(V_WR_TID(c3cn->tid)); | ||
575 | req->sndseq = htonl(c3cn->snd_nxt); | ||
576 | /* len includes the length of any HW ULP additions */ | ||
577 | req->len = htonl(len); | ||
578 | req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx)); | ||
579 | /* V_TX_ULP_SUBMODE sets both the mode and submode */ | ||
580 | req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) | | ||
581 | V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1))); | ||
582 | |||
583 | if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) { | ||
584 | req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | | ||
585 | V_TX_CPU_IDX(c3cn->qset)); | ||
586 | /* Sendbuffer is in units of 32KB. */ | ||
587 | req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15)); | ||
588 | c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT); | ||
589 | } | ||
590 | } | ||
591 | |||
592 | /** | ||
593 | * c3cn_push_tx_frames -- start transmit | ||
594 | * @c3cn: the offloaded connection | ||
595 | * @req_completion: request wr_ack or not | ||
596 | * | ||
597 | * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a | ||
598 | * connection's send queue and sends them on to T3. Must be called with the | ||
599 | * connection's lock held. Returns the amount of send buffer space that was | ||
600 | * freed as a result of sending queued data to T3. | ||
601 | */ | ||
602 | static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb) | ||
603 | { | ||
604 | kfree_skb(skb); | ||
605 | } | ||
606 | |||
607 | static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | ||
608 | { | ||
609 | int total_size = 0; | ||
610 | struct sk_buff *skb; | ||
611 | struct t3cdev *cdev; | ||
612 | struct cxgb3i_sdev_data *cdata; | ||
613 | |||
614 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || | ||
615 | c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || | ||
616 | c3cn->state >= C3CN_STATE_ABORTING)) { | ||
617 | c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", | ||
618 | c3cn, c3cn->state); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | cdev = c3cn->cdev; | ||
623 | cdata = CXGB3_SDEV_DATA(cdev); | ||
624 | |||
625 | while (c3cn->wr_avail | ||
626 | && (skb = skb_peek(&c3cn->write_queue)) != NULL) { | ||
627 | int len = skb->len; /* length before skb_push */ | ||
628 | int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); | ||
629 | int wrs_needed = skb_wrs[frags]; | ||
630 | |||
631 | if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) | ||
632 | wrs_needed = 1; | ||
633 | |||
634 | WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); | ||
635 | |||
636 | if (c3cn->wr_avail < wrs_needed) { | ||
637 | c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " | ||
638 | "wr %d < %u.\n", | ||
639 | c3cn, skb->len, skb->data_len, frags, | ||
640 | wrs_needed, c3cn->wr_avail); | ||
641 | break; | ||
642 | } | ||
643 | |||
644 | __skb_unlink(skb, &c3cn->write_queue); | ||
645 | skb->priority = CPL_PRIORITY_DATA; | ||
646 | skb->csum = wrs_needed; /* remember this until the WR_ACK */ | ||
647 | c3cn->wr_avail -= wrs_needed; | ||
648 | c3cn->wr_unacked += wrs_needed; | ||
649 | enqueue_wr(c3cn, skb); | ||
650 | |||
651 | c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, " | ||
652 | "wr %d, left %u, unack %u.\n", | ||
653 | c3cn, skb->len, skb->data_len, frags, | ||
654 | wrs_needed, c3cn->wr_avail, c3cn->wr_unacked); | ||
655 | |||
656 | |||
657 | if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) { | ||
658 | if ((req_completion && | ||
659 | c3cn->wr_unacked == wrs_needed) || | ||
660 | (skb_flags(skb) & C3CB_FLAG_COMPL) || | ||
661 | c3cn->wr_unacked >= c3cn->wr_max / 2) { | ||
662 | req_completion = 1; | ||
663 | c3cn->wr_unacked = 0; | ||
664 | } | ||
665 | len += ulp_extra_len(skb); | ||
666 | make_tx_data_wr(c3cn, skb, len, req_completion); | ||
667 | c3cn->snd_nxt += len; | ||
668 | skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR; | ||
669 | } | ||
670 | |||
671 | total_size += skb->truesize; | ||
672 | set_arp_failure_handler(skb, arp_failure_discard); | ||
673 | l2t_send(cdev, skb, c3cn->l2t); | ||
674 | } | ||
675 | return total_size; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * process_cpl_msg: -> host | ||
680 | * Top-level CPL message processing used by most CPL messages that | ||
681 | * pertain to connections. | ||
682 | */ | ||
683 | static inline void process_cpl_msg(void (*fn)(struct s3_conn *, | ||
684 | struct sk_buff *), | ||
685 | struct s3_conn *c3cn, | ||
686 | struct sk_buff *skb) | ||
687 | { | ||
688 | spin_lock_bh(&c3cn->lock); | ||
689 | fn(c3cn, skb); | ||
690 | spin_unlock_bh(&c3cn->lock); | ||
691 | } | ||
692 | |||
693 | /* | ||
694 | * process_cpl_msg_ref: -> host | ||
695 | * Similar to process_cpl_msg() but takes an extra connection reference around | ||
696 | * the call to the handler. Should be used if the handler may drop a | ||
697 | * connection reference. | ||
698 | */ | ||
699 | static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *, | ||
700 | struct sk_buff *), | ||
701 | struct s3_conn *c3cn, | ||
702 | struct sk_buff *skb) | ||
703 | { | ||
704 | c3cn_hold(c3cn); | ||
705 | process_cpl_msg(fn, c3cn, skb); | ||
706 | c3cn_put(c3cn); | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * Process a CPL_ACT_ESTABLISH message: -> host | ||
711 | * Updates connection state from an active establish CPL message. Runs with | ||
712 | * the connection lock held. | ||
713 | */ | ||
714 | |||
715 | static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid) | ||
716 | { | ||
717 | struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid); | ||
718 | if (c3cn) | ||
719 | c3cn_put(c3cn); | ||
720 | } | ||
721 | |||
722 | static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn, | ||
723 | unsigned int opt) | ||
724 | { | ||
725 | c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); | ||
726 | |||
727 | c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn; | ||
728 | |||
729 | /* | ||
730 | * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't | ||
731 | * pass through opt0. | ||
732 | */ | ||
733 | if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10)) | ||
734 | c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10); | ||
735 | |||
736 | dst_confirm(c3cn->dst_cache); | ||
737 | |||
738 | smp_mb(); | ||
739 | |||
740 | c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED); | ||
741 | } | ||
742 | |||
743 | static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb) | ||
744 | { | ||
745 | struct cpl_act_establish *req = cplhdr(skb); | ||
746 | u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ | ||
747 | |||
748 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
749 | c3cn, c3cn->state, c3cn->flags); | ||
750 | |||
751 | if (unlikely(c3cn->state != C3CN_STATE_CONNECTING)) | ||
752 | cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n", | ||
753 | c3cn->tid, c3cn->state); | ||
754 | |||
755 | c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn; | ||
756 | c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | ||
757 | |||
758 | __kfree_skb(skb); | ||
759 | |||
760 | if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) | ||
761 | /* upper layer has requested closing */ | ||
762 | send_abort_req(c3cn); | ||
763 | else { | ||
764 | if (skb_queue_len(&c3cn->write_queue)) | ||
765 | c3cn_push_tx_frames(c3cn, 1); | ||
766 | cxgb3i_conn_tx_open(c3cn); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, | ||
771 | void *ctx) | ||
772 | { | ||
773 | struct cpl_act_establish *req = cplhdr(skb); | ||
774 | unsigned int tid = GET_TID(req); | ||
775 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | ||
776 | struct s3_conn *c3cn = ctx; | ||
777 | struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev); | ||
778 | |||
779 | c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n", | ||
780 | tid, c3cn, c3cn->state, c3cn->flags); | ||
781 | |||
782 | c3cn->tid = tid; | ||
783 | c3cn_hold(c3cn); | ||
784 | cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid); | ||
785 | s3_free_atid(cdev, atid); | ||
786 | |||
787 | c3cn->qset = G_QNUM(ntohl(skb->csum)); | ||
788 | |||
789 | process_cpl_msg(process_act_establish, c3cn, skb); | ||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | /* | ||
794 | * Process a CPL_ACT_OPEN_RPL message: -> host | ||
795 | * Handle active open failures. | ||
796 | */ | ||
797 | static int act_open_rpl_status_to_errno(int status) | ||
798 | { | ||
799 | switch (status) { | ||
800 | case CPL_ERR_CONN_RESET: | ||
801 | return -ECONNREFUSED; | ||
802 | case CPL_ERR_ARP_MISS: | ||
803 | return -EHOSTUNREACH; | ||
804 | case CPL_ERR_CONN_TIMEDOUT: | ||
805 | return -ETIMEDOUT; | ||
806 | case CPL_ERR_TCAM_FULL: | ||
807 | return -ENOMEM; | ||
808 | case CPL_ERR_CONN_EXIST: | ||
809 | cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); | ||
810 | return -EADDRINUSE; | ||
811 | default: | ||
812 | return -EIO; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | static void act_open_retry_timer(unsigned long data) | ||
817 | { | ||
818 | struct sk_buff *skb; | ||
819 | struct s3_conn *c3cn = (struct s3_conn *)data; | ||
820 | |||
821 | c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); | ||
822 | |||
823 | spin_lock_bh(&c3cn->lock); | ||
824 | skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); | ||
825 | if (!skb) | ||
826 | fail_act_open(c3cn, -ENOMEM); | ||
827 | else { | ||
828 | skb->sk = (struct sock *)c3cn; | ||
829 | set_arp_failure_handler(skb, act_open_req_arp_failure); | ||
830 | make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t); | ||
831 | l2t_send(c3cn->cdev, skb, c3cn->l2t); | ||
832 | } | ||
833 | spin_unlock_bh(&c3cn->lock); | ||
834 | c3cn_put(c3cn); | ||
835 | } | ||
836 | |||
837 | static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb) | ||
838 | { | ||
839 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | ||
840 | |||
841 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
842 | c3cn, c3cn->state, c3cn->flags); | ||
843 | |||
844 | if (rpl->status == CPL_ERR_CONN_EXIST && | ||
845 | c3cn->retry_timer.function != act_open_retry_timer) { | ||
846 | c3cn->retry_timer.function = act_open_retry_timer; | ||
847 | if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2)) | ||
848 | c3cn_hold(c3cn); | ||
849 | } else | ||
850 | fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status)); | ||
851 | __kfree_skb(skb); | ||
852 | } | ||
853 | |||
854 | static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
855 | { | ||
856 | struct s3_conn *c3cn = ctx; | ||
857 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | ||
858 | |||
859 | c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n", | ||
860 | rpl->status, c3cn, c3cn->state, c3cn->flags); | ||
861 | |||
862 | if (rpl->status != CPL_ERR_TCAM_FULL && | ||
863 | rpl->status != CPL_ERR_CONN_EXIST && | ||
864 | rpl->status != CPL_ERR_ARP_MISS) | ||
865 | cxgb3_queue_tid_release(cdev, GET_TID(rpl)); | ||
866 | |||
867 | process_cpl_msg_ref(process_act_open_rpl, c3cn, skb); | ||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * Process PEER_CLOSE CPL messages: -> host | ||
873 | * Handle peer FIN. | ||
874 | */ | ||
875 | static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb) | ||
876 | { | ||
877 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
878 | c3cn, c3cn->state, c3cn->flags); | ||
879 | |||
880 | if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) | ||
881 | goto out; | ||
882 | |||
883 | switch (c3cn->state) { | ||
884 | case C3CN_STATE_ESTABLISHED: | ||
885 | c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE); | ||
886 | break; | ||
887 | case C3CN_STATE_ACTIVE_CLOSE: | ||
888 | c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2); | ||
889 | break; | ||
890 | case C3CN_STATE_CLOSE_WAIT_1: | ||
891 | c3cn_closed(c3cn); | ||
892 | break; | ||
893 | case C3CN_STATE_ABORTING: | ||
894 | break; | ||
895 | default: | ||
896 | cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n", | ||
897 | c3cn->cdev->name, c3cn->tid, c3cn->state); | ||
898 | } | ||
899 | |||
900 | cxgb3i_conn_closing(c3cn); | ||
901 | out: | ||
902 | __kfree_skb(skb); | ||
903 | } | ||
904 | |||
905 | static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
906 | { | ||
907 | struct s3_conn *c3cn = ctx; | ||
908 | |||
909 | c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n", | ||
910 | c3cn, c3cn->state, c3cn->flags); | ||
911 | process_cpl_msg_ref(process_peer_close, c3cn, skb); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | /* | ||
916 | * Process CLOSE_CONN_RPL CPL message: -> host | ||
917 | * Process a peer ACK to our FIN. | ||
918 | */ | ||
919 | static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb) | ||
920 | { | ||
921 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | ||
922 | |||
923 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
924 | c3cn, c3cn->state, c3cn->flags); | ||
925 | |||
926 | c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */ | ||
927 | |||
928 | if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) | ||
929 | goto out; | ||
930 | |||
931 | switch (c3cn->state) { | ||
932 | case C3CN_STATE_ACTIVE_CLOSE: | ||
933 | c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1); | ||
934 | break; | ||
935 | case C3CN_STATE_CLOSE_WAIT_1: | ||
936 | case C3CN_STATE_CLOSE_WAIT_2: | ||
937 | c3cn_closed(c3cn); | ||
938 | break; | ||
939 | case C3CN_STATE_ABORTING: | ||
940 | break; | ||
941 | default: | ||
942 | cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n", | ||
943 | c3cn->cdev->name, c3cn->tid, c3cn->state); | ||
944 | } | ||
945 | |||
946 | out: | ||
947 | kfree_skb(skb); | ||
948 | } | ||
949 | |||
950 | static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, | ||
951 | void *ctx) | ||
952 | { | ||
953 | struct s3_conn *c3cn = ctx; | ||
954 | |||
955 | c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n", | ||
956 | c3cn, c3cn->state, c3cn->flags); | ||
957 | |||
958 | process_cpl_msg_ref(process_close_con_rpl, c3cn, skb); | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | /* | ||
963 | * Process ABORT_REQ_RSS CPL message: -> host | ||
964 | * Process abort requests. If we are waiting for an ABORT_RPL we ignore this | ||
965 | * request except that we need to reply to it. | ||
966 | */ | ||
967 | |||
968 | static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason, | ||
969 | int *need_rst) | ||
970 | { | ||
971 | switch (abort_reason) { | ||
972 | case CPL_ERR_BAD_SYN: /* fall through */ | ||
973 | case CPL_ERR_CONN_RESET: | ||
974 | return c3cn->state > C3CN_STATE_ESTABLISHED ? | ||
975 | -EPIPE : -ECONNRESET; | ||
976 | case CPL_ERR_XMIT_TIMEDOUT: | ||
977 | case CPL_ERR_PERSIST_TIMEDOUT: | ||
978 | case CPL_ERR_FINWAIT2_TIMEDOUT: | ||
979 | case CPL_ERR_KEEPALIVE_TIMEDOUT: | ||
980 | return -ETIMEDOUT; | ||
981 | default: | ||
982 | return -EIO; | ||
983 | } | ||
984 | } | ||
985 | |||
986 | static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb) | ||
987 | { | ||
988 | int rst_status = CPL_ABORT_NO_RST; | ||
989 | const struct cpl_abort_req_rss *req = cplhdr(skb); | ||
990 | |||
991 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
992 | c3cn, c3cn->state, c3cn->flags); | ||
993 | |||
994 | if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) { | ||
995 | c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD); | ||
996 | c3cn_set_state(c3cn, C3CN_STATE_ABORTING); | ||
997 | __kfree_skb(skb); | ||
998 | return; | ||
999 | } | ||
1000 | |||
1001 | c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD); | ||
1002 | send_abort_rpl(c3cn, rst_status); | ||
1003 | |||
1004 | if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) { | ||
1005 | c3cn->err = | ||
1006 | abort_status_to_errno(c3cn, req->status, &rst_status); | ||
1007 | c3cn_closed(c3cn); | ||
1008 | } | ||
1009 | } | ||
1010 | |||
1011 | static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
1012 | { | ||
1013 | const struct cpl_abort_req_rss *req = cplhdr(skb); | ||
1014 | struct s3_conn *c3cn = ctx; | ||
1015 | |||
1016 | c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n", | ||
1017 | c3cn, c3cn->state, c3cn->flags); | ||
1018 | |||
1019 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | ||
1020 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) { | ||
1021 | __kfree_skb(skb); | ||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | process_cpl_msg_ref(process_abort_req, c3cn, skb); | ||
1026 | return 0; | ||
1027 | } | ||
1028 | |||
1029 | /* | ||
1030 | * Process ABORT_RPL_RSS CPL message: -> host | ||
1031 | * Process abort replies. We only process these messages if we anticipate | ||
1032 | * them as the coordination between SW and HW in this area is somewhat lacking | ||
1033 | * and sometimes we get ABORT_RPLs after we are done with the connection that | ||
1034 | * originated the ABORT_REQ. | ||
1035 | */ | ||
1036 | static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb) | ||
1037 | { | ||
1038 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
1039 | c3cn, c3cn->state, c3cn->flags); | ||
1040 | |||
1041 | if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) { | ||
1042 | if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD)) | ||
1043 | c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD); | ||
1044 | else { | ||
1045 | c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD); | ||
1046 | c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING); | ||
1047 | if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) | ||
1048 | cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n", | ||
1049 | c3cn->cdev->name, c3cn->tid); | ||
1050 | c3cn_closed(c3cn); | ||
1051 | } | ||
1052 | } | ||
1053 | __kfree_skb(skb); | ||
1054 | } | ||
1055 | |||
1056 | static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
1057 | { | ||
1058 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | ||
1059 | struct s3_conn *c3cn = ctx; | ||
1060 | |||
1061 | c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n", | ||
1062 | rpl->status, c3cn, c3cn ? c3cn->state : 0, | ||
1063 | c3cn ? c3cn->flags : 0UL); | ||
1064 | |||
1065 | /* | ||
1066 | * Ignore replies to post-close aborts indicating that the abort was | ||
1067 | * requested too late. These connections are terminated when we get | ||
1068 | * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss | ||
1069 | * arrives the TID is either no longer used or it has been recycled. | ||
1070 | */ | ||
1071 | if (rpl->status == CPL_ERR_ABORT_FAILED) | ||
1072 | goto discard; | ||
1073 | |||
1074 | /* | ||
1075 | * Sometimes we've already closed the connection, e.g., a post-close | ||
1076 | * abort races with ABORT_REQ_RSS, the latter frees the connection | ||
1077 | * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, | ||
1078 | * but FW turns the ABORT_REQ into a regular one and so we get | ||
1079 | * ABORT_RPL_RSS with status 0 and no connection. | ||
1080 | */ | ||
1081 | if (!c3cn) | ||
1082 | goto discard; | ||
1083 | |||
1084 | process_cpl_msg_ref(process_abort_rpl, c3cn, skb); | ||
1085 | return 0; | ||
1086 | |||
1087 | discard: | ||
1088 | __kfree_skb(skb); | ||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | /* | ||
1093 | * Process RX_ISCSI_HDR CPL message: -> host | ||
1094 | * Handle received PDUs, the payload could be DDP'ed. If not, the payload | ||
1095 | * follow after the bhs. | ||
1096 | */ | ||
1097 | static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | ||
1098 | { | ||
1099 | struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); | ||
1100 | struct cpl_iscsi_hdr_norss data_cpl; | ||
1101 | struct cpl_rx_data_ddp_norss ddp_cpl; | ||
1102 | unsigned int hdr_len, data_len, status; | ||
1103 | unsigned int len; | ||
1104 | int err; | ||
1105 | |||
1106 | if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) { | ||
1107 | if (c3cn->state != C3CN_STATE_ABORTING) | ||
1108 | send_abort_req(c3cn); | ||
1109 | __kfree_skb(skb); | ||
1110 | return; | ||
1111 | } | ||
1112 | |||
1113 | skb_tcp_seq(skb) = ntohl(hdr_cpl->seq); | ||
1114 | skb_flags(skb) = 0; | ||
1115 | |||
1116 | skb_reset_transport_header(skb); | ||
1117 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | ||
1118 | |||
1119 | len = hdr_len = ntohs(hdr_cpl->len); | ||
1120 | /* msg coalesce is off or not enough data received */ | ||
1121 | if (skb->len <= hdr_len) { | ||
1122 | cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n", | ||
1123 | c3cn->cdev->name, c3cn->tid, | ||
1124 | skb->len, hdr_len); | ||
1125 | goto abort_conn; | ||
1126 | } | ||
1127 | |||
1128 | err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, | ||
1129 | sizeof(ddp_cpl)); | ||
1130 | if (err < 0) | ||
1131 | goto abort_conn; | ||
1132 | |||
1133 | skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; | ||
1134 | skb_rx_pdulen(skb) = ntohs(ddp_cpl.len); | ||
1135 | skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | ||
1136 | status = ntohl(ddp_cpl.ddp_status); | ||
1137 | |||
1138 | c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", | ||
1139 | skb, skb->len, skb_rx_pdulen(skb), status); | ||
1140 | |||
1141 | if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) | ||
1142 | skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; | ||
1143 | if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT)) | ||
1144 | skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR; | ||
1145 | if (status & (1 << RX_DDP_STATUS_PAD_SHIFT)) | ||
1146 | skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR; | ||
1147 | |||
1148 | if (skb->len > (hdr_len + sizeof(ddp_cpl))) { | ||
1149 | err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); | ||
1150 | if (err < 0) | ||
1151 | goto abort_conn; | ||
1152 | data_len = ntohs(data_cpl.len); | ||
1153 | len += sizeof(data_cpl) + data_len; | ||
1154 | } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) | ||
1155 | skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; | ||
1156 | |||
1157 | c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb); | ||
1158 | __pskb_trim(skb, len); | ||
1159 | __skb_queue_tail(&c3cn->receive_queue, skb); | ||
1160 | cxgb3i_conn_pdu_ready(c3cn); | ||
1161 | |||
1162 | return; | ||
1163 | |||
1164 | abort_conn: | ||
1165 | send_abort_req(c3cn); | ||
1166 | __kfree_skb(skb); | ||
1167 | } | ||
1168 | |||
1169 | static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | ||
1170 | { | ||
1171 | struct s3_conn *c3cn = ctx; | ||
1172 | |||
1173 | process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb); | ||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * Process TX_DATA_ACK CPL messages: -> host | ||
1179 | * Process an acknowledgment of WR completion. Advance snd_una and send the | ||
1180 | * next batch of work requests from the write queue. | ||
1181 | */ | ||
1182 | static void check_wr_invariants(struct s3_conn *c3cn) | ||
1183 | { | ||
1184 | int pending = count_pending_wrs(c3cn); | ||
1185 | |||
1186 | if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max)) | ||
1187 | cxgb3i_log_error("TID %u: credit imbalance: avail %u, " | ||
1188 | "pending %u, total should be %u\n", | ||
1189 | c3cn->tid, c3cn->wr_avail, pending, | ||
1190 | c3cn->wr_max); | ||
1191 | } | ||
1192 | |||
1193 | static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | ||
1194 | { | ||
1195 | struct cpl_wr_ack *hdr = cplhdr(skb); | ||
1196 | unsigned int credits = ntohs(hdr->credits); | ||
1197 | u32 snd_una = ntohl(hdr->snd_una); | ||
1198 | |||
1199 | c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n", | ||
1200 | credits, c3cn->wr_avail, c3cn->wr_unacked, | ||
1201 | c3cn->tid, c3cn->state); | ||
1202 | |||
1203 | c3cn->wr_avail += credits; | ||
1204 | if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) | ||
1205 | c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; | ||
1206 | |||
1207 | while (credits) { | ||
1208 | struct sk_buff *p = peek_wr(c3cn); | ||
1209 | |||
1210 | if (unlikely(!p)) { | ||
1211 | cxgb3i_log_error("%u WR_ACK credits for TID %u with " | ||
1212 | "nothing pending, state %u\n", | ||
1213 | credits, c3cn->tid, c3cn->state); | ||
1214 | break; | ||
1215 | } | ||
1216 | if (unlikely(credits < p->csum)) { | ||
1217 | struct tx_data_wr *w = cplhdr(p); | ||
1218 | cxgb3i_log_error("TID %u got %u WR credits need %u, " | ||
1219 | "len %u, main body %u, frags %u, " | ||
1220 | "seq # %u, ACK una %u, ACK nxt %u, " | ||
1221 | "WR_AVAIL %u, WRs pending %u\n", | ||
1222 | c3cn->tid, credits, p->csum, p->len, | ||
1223 | p->len - p->data_len, | ||
1224 | skb_shinfo(p)->nr_frags, | ||
1225 | ntohl(w->sndseq), snd_una, | ||
1226 | ntohl(hdr->snd_nxt), c3cn->wr_avail, | ||
1227 | count_pending_wrs(c3cn) - credits); | ||
1228 | p->csum -= credits; | ||
1229 | break; | ||
1230 | } else { | ||
1231 | dequeue_wr(c3cn); | ||
1232 | credits -= p->csum; | ||
1233 | free_wr_skb(p); | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1237 | check_wr_invariants(c3cn); | ||
1238 | |||
1239 | if (unlikely(before(snd_una, c3cn->snd_una))) { | ||
1240 | cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK " | ||
1241 | "snd_una %u\n", | ||
1242 | c3cn->tid, snd_una, c3cn->snd_una); | ||
1243 | goto out_free; | ||
1244 | } | ||
1245 | |||
1246 | if (c3cn->snd_una != snd_una) { | ||
1247 | c3cn->snd_una = snd_una; | ||
1248 | dst_confirm(c3cn->dst_cache); | ||
1249 | } | ||
1250 | |||
1251 | if (skb_queue_len(&c3cn->write_queue)) { | ||
1252 | if (c3cn_push_tx_frames(c3cn, 0)) | ||
1253 | cxgb3i_conn_tx_open(c3cn); | ||
1254 | } else | ||
1255 | cxgb3i_conn_tx_open(c3cn); | ||
1256 | out_free: | ||
1257 | __kfree_skb(skb); | ||
1258 | } | ||
1259 | |||
1260 | static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
1261 | { | ||
1262 | struct s3_conn *c3cn = ctx; | ||
1263 | |||
1264 | process_cpl_msg(process_wr_ack, c3cn, skb); | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | /* | ||
1269 | * for each connection, pre-allocate skbs needed for close/abort requests. So | ||
1270 | * that we can service the request right away. | ||
1271 | */ | ||
1272 | static void c3cn_free_cpl_skbs(struct s3_conn *c3cn) | ||
1273 | { | ||
1274 | if (c3cn->cpl_close) | ||
1275 | kfree_skb(c3cn->cpl_close); | ||
1276 | if (c3cn->cpl_abort_req) | ||
1277 | kfree_skb(c3cn->cpl_abort_req); | ||
1278 | if (c3cn->cpl_abort_rpl) | ||
1279 | kfree_skb(c3cn->cpl_abort_rpl); | ||
1280 | } | ||
1281 | |||
1282 | static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn) | ||
1283 | { | ||
1284 | c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req), | ||
1285 | GFP_KERNEL); | ||
1286 | if (!c3cn->cpl_close) | ||
1287 | return -ENOMEM; | ||
1288 | skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req)); | ||
1289 | |||
1290 | c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req), | ||
1291 | GFP_KERNEL); | ||
1292 | if (!c3cn->cpl_abort_req) | ||
1293 | goto free_cpl_skbs; | ||
1294 | skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req)); | ||
1295 | |||
1296 | c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl), | ||
1297 | GFP_KERNEL); | ||
1298 | if (!c3cn->cpl_abort_rpl) | ||
1299 | goto free_cpl_skbs; | ||
1300 | skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl)); | ||
1301 | |||
1302 | return 0; | ||
1303 | |||
1304 | free_cpl_skbs: | ||
1305 | c3cn_free_cpl_skbs(c3cn); | ||
1306 | return -ENOMEM; | ||
1307 | } | ||
1308 | |||
1309 | /** | ||
1310 | * c3cn_release_offload_resources - release offload resource | ||
1311 | * @c3cn: the offloaded iscsi tcp connection. | ||
1312 | * Release resources held by an offload connection (TID, L2T entry, etc.) | ||
1313 | */ | ||
1314 | static void c3cn_release_offload_resources(struct s3_conn *c3cn) | ||
1315 | { | ||
1316 | struct t3cdev *cdev = c3cn->cdev; | ||
1317 | unsigned int tid = c3cn->tid; | ||
1318 | |||
1319 | c3cn->qset = 0; | ||
1320 | c3cn_free_cpl_skbs(c3cn); | ||
1321 | |||
1322 | if (c3cn->wr_avail != c3cn->wr_max) { | ||
1323 | purge_wr_queue(c3cn); | ||
1324 | reset_wr_list(c3cn); | ||
1325 | } | ||
1326 | |||
1327 | if (cdev) { | ||
1328 | if (c3cn->l2t) { | ||
1329 | l2t_release(L2DATA(cdev), c3cn->l2t); | ||
1330 | c3cn->l2t = NULL; | ||
1331 | } | ||
1332 | if (c3cn->state == C3CN_STATE_CONNECTING) | ||
1333 | /* we have ATID */ | ||
1334 | s3_free_atid(cdev, tid); | ||
1335 | else { | ||
1336 | /* we have TID */ | ||
1337 | cxgb3_remove_tid(cdev, (void *)c3cn, tid); | ||
1338 | c3cn_put(c3cn); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | c3cn->dst_cache = NULL; | ||
1343 | c3cn->cdev = NULL; | ||
1344 | } | ||
1345 | |||
1346 | /** | ||
1347 | * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure | ||
1348 | * returns the s3_conn structure allocated. | ||
1349 | */ | ||
1350 | struct s3_conn *cxgb3i_c3cn_create(void) | ||
1351 | { | ||
1352 | struct s3_conn *c3cn; | ||
1353 | |||
1354 | c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL); | ||
1355 | if (!c3cn) | ||
1356 | return NULL; | ||
1357 | |||
1358 | /* pre-allocate close/abort cpl, so we don't need to wait for memory | ||
1359 | when close/abort is requested. */ | ||
1360 | if (c3cn_alloc_cpl_skbs(c3cn) < 0) | ||
1361 | goto free_c3cn; | ||
1362 | |||
1363 | c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn); | ||
1364 | |||
1365 | c3cn->flags = 0; | ||
1366 | spin_lock_init(&c3cn->lock); | ||
1367 | atomic_set(&c3cn->refcnt, 1); | ||
1368 | skb_queue_head_init(&c3cn->receive_queue); | ||
1369 | skb_queue_head_init(&c3cn->write_queue); | ||
1370 | setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn); | ||
1371 | rwlock_init(&c3cn->callback_lock); | ||
1372 | |||
1373 | return c3cn; | ||
1374 | |||
1375 | free_c3cn: | ||
1376 | kfree(c3cn); | ||
1377 | return NULL; | ||
1378 | } | ||
1379 | |||
1380 | static void c3cn_active_close(struct s3_conn *c3cn) | ||
1381 | { | ||
1382 | int data_lost; | ||
1383 | int close_req = 0; | ||
1384 | |||
1385 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
1386 | c3cn, c3cn->state, c3cn->flags); | ||
1387 | |||
1388 | dst_confirm(c3cn->dst_cache); | ||
1389 | |||
1390 | c3cn_hold(c3cn); | ||
1391 | spin_lock_bh(&c3cn->lock); | ||
1392 | |||
1393 | data_lost = skb_queue_len(&c3cn->receive_queue); | ||
1394 | __skb_queue_purge(&c3cn->receive_queue); | ||
1395 | |||
1396 | switch (c3cn->state) { | ||
1397 | case C3CN_STATE_CLOSED: | ||
1398 | case C3CN_STATE_ACTIVE_CLOSE: | ||
1399 | case C3CN_STATE_CLOSE_WAIT_1: | ||
1400 | case C3CN_STATE_CLOSE_WAIT_2: | ||
1401 | case C3CN_STATE_ABORTING: | ||
1402 | /* nothing need to be done */ | ||
1403 | break; | ||
1404 | case C3CN_STATE_CONNECTING: | ||
1405 | /* defer until cpl_act_open_rpl or cpl_act_establish */ | ||
1406 | c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED); | ||
1407 | break; | ||
1408 | case C3CN_STATE_ESTABLISHED: | ||
1409 | close_req = 1; | ||
1410 | c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE); | ||
1411 | break; | ||
1412 | case C3CN_STATE_PASSIVE_CLOSE: | ||
1413 | close_req = 1; | ||
1414 | c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2); | ||
1415 | break; | ||
1416 | } | ||
1417 | |||
1418 | if (close_req) { | ||
1419 | if (data_lost) | ||
1420 | /* Unread data was tossed, zap the connection. */ | ||
1421 | send_abort_req(c3cn); | ||
1422 | else | ||
1423 | send_close_req(c3cn); | ||
1424 | } | ||
1425 | |||
1426 | spin_unlock_bh(&c3cn->lock); | ||
1427 | c3cn_put(c3cn); | ||
1428 | } | ||
1429 | |||
1430 | /** | ||
1431 | * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any | ||
1432 | * resource held | ||
1433 | * @c3cn: the iscsi tcp connection | ||
1434 | */ | ||
1435 | void cxgb3i_c3cn_release(struct s3_conn *c3cn) | ||
1436 | { | ||
1437 | c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n", | ||
1438 | c3cn, c3cn->state, c3cn->flags); | ||
1439 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING)) | ||
1440 | c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED); | ||
1441 | else if (likely(c3cn->state != C3CN_STATE_CLOSED)) | ||
1442 | c3cn_active_close(c3cn); | ||
1443 | c3cn_put(c3cn); | ||
1444 | } | ||
1445 | |||
1446 | static int is_cxgb3_dev(struct net_device *dev) | ||
1447 | { | ||
1448 | struct cxgb3i_sdev_data *cdata; | ||
1449 | struct net_device *ndev = dev; | ||
1450 | |||
1451 | if (dev->priv_flags & IFF_802_1Q_VLAN) | ||
1452 | ndev = vlan_dev_real_dev(dev); | ||
1453 | |||
1454 | write_lock(&cdata_rwlock); | ||
1455 | list_for_each_entry(cdata, &cdata_list, list) { | ||
1456 | struct adap_ports *ports = &cdata->ports; | ||
1457 | int i; | ||
1458 | |||
1459 | for (i = 0; i < ports->nports; i++) | ||
1460 | if (ndev == ports->lldevs[i]) { | ||
1461 | write_unlock(&cdata_rwlock); | ||
1462 | return 1; | ||
1463 | } | ||
1464 | } | ||
1465 | write_unlock(&cdata_rwlock); | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | /** | ||
1470 | * cxgb3_egress_dev - return the cxgb3 egress device | ||
1471 | * @root_dev: the root device anchoring the search | ||
1472 | * @c3cn: the connection used to determine egress port in bonding mode | ||
1473 | * @context: in bonding mode, indicates a connection set up or failover | ||
1474 | * | ||
1475 | * Return egress device or NULL if the egress device isn't one of our ports. | ||
1476 | */ | ||
1477 | static struct net_device *cxgb3_egress_dev(struct net_device *root_dev, | ||
1478 | struct s3_conn *c3cn, | ||
1479 | int context) | ||
1480 | { | ||
1481 | while (root_dev) { | ||
1482 | if (root_dev->priv_flags & IFF_802_1Q_VLAN) | ||
1483 | root_dev = vlan_dev_real_dev(root_dev); | ||
1484 | else if (is_cxgb3_dev(root_dev)) | ||
1485 | return root_dev; | ||
1486 | else | ||
1487 | return NULL; | ||
1488 | } | ||
1489 | return NULL; | ||
1490 | } | ||
1491 | |||
1492 | static struct rtable *find_route(struct net_device *dev, | ||
1493 | __be32 saddr, __be32 daddr, | ||
1494 | __be16 sport, __be16 dport) | ||
1495 | { | ||
1496 | struct rtable *rt; | ||
1497 | struct flowi fl = { | ||
1498 | .oif = dev ? dev->ifindex : 0, | ||
1499 | .nl_u = { | ||
1500 | .ip4_u = { | ||
1501 | .daddr = daddr, | ||
1502 | .saddr = saddr, | ||
1503 | .tos = 0 } }, | ||
1504 | .proto = IPPROTO_TCP, | ||
1505 | .uli_u = { | ||
1506 | .ports = { | ||
1507 | .sport = sport, | ||
1508 | .dport = dport } } }; | ||
1509 | |||
1510 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) | ||
1511 | return NULL; | ||
1512 | return rt; | ||
1513 | } | ||
1514 | |||
1515 | /* | ||
1516 | * Assign offload parameters to some connection fields. | ||
1517 | */ | ||
1518 | static void init_offload_conn(struct s3_conn *c3cn, | ||
1519 | struct t3cdev *cdev, | ||
1520 | struct dst_entry *dst) | ||
1521 | { | ||
1522 | BUG_ON(c3cn->cdev != cdev); | ||
1523 | c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1; | ||
1524 | c3cn->wr_unacked = 0; | ||
1525 | c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); | ||
1526 | |||
1527 | reset_wr_list(c3cn); | ||
1528 | } | ||
1529 | |||
1530 | static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev) | ||
1531 | { | ||
1532 | struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev); | ||
1533 | struct t3cdev *cdev = cdata->cdev; | ||
1534 | struct dst_entry *dst = c3cn->dst_cache; | ||
1535 | struct sk_buff *skb; | ||
1536 | |||
1537 | c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", | ||
1538 | c3cn, c3cn->state, c3cn->flags); | ||
1539 | /* | ||
1540 | * Initialize connection data. Note that the flags and ULP mode are | ||
1541 | * initialized higher up ... | ||
1542 | */ | ||
1543 | c3cn->dev = dev; | ||
1544 | c3cn->cdev = cdev; | ||
1545 | c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn); | ||
1546 | if (c3cn->tid < 0) | ||
1547 | goto out_err; | ||
1548 | |||
1549 | c3cn->qset = 0; | ||
1550 | c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev); | ||
1551 | if (!c3cn->l2t) | ||
1552 | goto free_tid; | ||
1553 | |||
1554 | skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL); | ||
1555 | if (!skb) | ||
1556 | goto free_l2t; | ||
1557 | |||
1558 | skb->sk = (struct sock *)c3cn; | ||
1559 | set_arp_failure_handler(skb, act_open_req_arp_failure); | ||
1560 | |||
1561 | c3cn_hold(c3cn); | ||
1562 | |||
1563 | init_offload_conn(c3cn, cdev, dst); | ||
1564 | c3cn->err = 0; | ||
1565 | |||
1566 | make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t); | ||
1567 | l2t_send(cdev, skb, c3cn->l2t); | ||
1568 | return 0; | ||
1569 | |||
1570 | free_l2t: | ||
1571 | l2t_release(L2DATA(cdev), c3cn->l2t); | ||
1572 | free_tid: | ||
1573 | s3_free_atid(cdev, c3cn->tid); | ||
1574 | c3cn->tid = 0; | ||
1575 | out_err: | ||
1576 | return -EINVAL; | ||
1577 | } | ||
1578 | |||
1579 | /** | ||
1580 | * cxgb3i_find_dev - find the interface associated with the given address | ||
1581 | * @ipaddr: ip address | ||
1582 | */ | ||
1583 | static struct net_device * | ||
1584 | cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) | ||
1585 | { | ||
1586 | struct flowi fl; | ||
1587 | int err; | ||
1588 | struct rtable *rt; | ||
1589 | |||
1590 | memset(&fl, 0, sizeof(fl)); | ||
1591 | fl.nl_u.ip4_u.daddr = ipaddr; | ||
1592 | |||
1593 | err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); | ||
1594 | if (!err) | ||
1595 | return (&rt->dst)->dev; | ||
1596 | |||
1597 | return NULL; | ||
1598 | } | ||
1599 | |||
1600 | /** | ||
1601 | * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address | ||
1602 | * @c3cn: the iscsi tcp connection | ||
1603 | * @usin: destination address | ||
1604 | * | ||
1605 | * return 0 if active open request is sent, < 0 otherwise. | ||
1606 | */ | ||
1607 | int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, | ||
1608 | struct sockaddr_in *usin) | ||
1609 | { | ||
1610 | struct rtable *rt; | ||
1611 | struct cxgb3i_sdev_data *cdata; | ||
1612 | struct t3cdev *cdev; | ||
1613 | __be32 sipv4; | ||
1614 | struct net_device *dstdev; | ||
1615 | int err; | ||
1616 | |||
1617 | c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); | ||
1618 | |||
1619 | if (usin->sin_family != AF_INET) | ||
1620 | return -EAFNOSUPPORT; | ||
1621 | |||
1622 | c3cn->daddr.sin_port = usin->sin_port; | ||
1623 | c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; | ||
1624 | |||
1625 | dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); | ||
1626 | if (!dstdev || !is_cxgb3_dev(dstdev)) | ||
1627 | return -ENETUNREACH; | ||
1628 | |||
1629 | if (dstdev->priv_flags & IFF_802_1Q_VLAN) | ||
1630 | dev = dstdev; | ||
1631 | |||
1632 | rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, | ||
1633 | c3cn->daddr.sin_addr.s_addr, | ||
1634 | c3cn->saddr.sin_port, | ||
1635 | c3cn->daddr.sin_port); | ||
1636 | if (rt == NULL) { | ||
1637 | c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n", | ||
1638 | c3cn->daddr.sin_addr.s_addr, | ||
1639 | ntohs(c3cn->daddr.sin_port), | ||
1640 | dev ? dev->name : "any"); | ||
1641 | return -ENETUNREACH; | ||
1642 | } | ||
1643 | |||
1644 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | ||
1645 | c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n", | ||
1646 | c3cn->daddr.sin_addr.s_addr, | ||
1647 | ntohs(c3cn->daddr.sin_port), | ||
1648 | dev ? dev->name : "any"); | ||
1649 | ip_rt_put(rt); | ||
1650 | return -ENETUNREACH; | ||
1651 | } | ||
1652 | |||
1653 | if (!c3cn->saddr.sin_addr.s_addr) | ||
1654 | c3cn->saddr.sin_addr.s_addr = rt->rt_src; | ||
1655 | |||
1656 | /* now commit destination to connection */ | ||
1657 | c3cn->dst_cache = &rt->dst; | ||
1658 | |||
1659 | /* try to establish an offloaded connection */ | ||
1660 | dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0); | ||
1661 | if (dev == NULL) { | ||
1662 | c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn); | ||
1663 | return -ENETUNREACH; | ||
1664 | } | ||
1665 | cdata = NDEV2CDATA(dev); | ||
1666 | cdev = cdata->cdev; | ||
1667 | |||
1668 | /* get a source port if one hasn't been provided */ | ||
1669 | err = c3cn_get_port(c3cn, cdata); | ||
1670 | if (err) | ||
1671 | return err; | ||
1672 | |||
1673 | c3cn_conn_debug("c3cn 0x%p get port %u.\n", | ||
1674 | c3cn, ntohs(c3cn->saddr.sin_port)); | ||
1675 | |||
1676 | sipv4 = cxgb3i_get_private_ipv4addr(dev); | ||
1677 | if (!sipv4) { | ||
1678 | c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn); | ||
1679 | sipv4 = c3cn->saddr.sin_addr.s_addr; | ||
1680 | cxgb3i_set_private_ipv4addr(dev, sipv4); | ||
1681 | } else | ||
1682 | c3cn->saddr.sin_addr.s_addr = sipv4; | ||
1683 | |||
1684 | c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n", | ||
1685 | c3cn, | ||
1686 | &c3cn->saddr.sin_addr.s_addr, | ||
1687 | ntohs(c3cn->saddr.sin_port), | ||
1688 | &c3cn->daddr.sin_addr.s_addr, | ||
1689 | ntohs(c3cn->daddr.sin_port)); | ||
1690 | |||
1691 | c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); | ||
1692 | if (!initiate_act_open(c3cn, dev)) | ||
1693 | return 0; | ||
1694 | |||
1695 | /* | ||
1696 | * If we get here, we don't have an offload connection so simply | ||
1697 | * return a failure. | ||
1698 | */ | ||
1699 | err = -ENOTSUPP; | ||
1700 | |||
1701 | /* | ||
1702 | * This trashes the connection and releases the local port, | ||
1703 | * if necessary. | ||
1704 | */ | ||
1705 | c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn); | ||
1706 | c3cn_set_state(c3cn, C3CN_STATE_CLOSED); | ||
1707 | ip_rt_put(rt); | ||
1708 | c3cn_put_port(c3cn); | ||
1709 | return err; | ||
1710 | } | ||
1711 | |||
1712 | /** | ||
1713 | * cxgb3i_c3cn_rx_credits - ack received tcp data. | ||
1714 | * @c3cn: iscsi tcp connection | ||
1715 | * @copied: # of bytes processed | ||
1716 | * | ||
1717 | * Called after some received data has been read. It returns RX credits | ||
1718 | * to the HW for the amount of data processed. | ||
1719 | */ | ||
1720 | void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied) | ||
1721 | { | ||
1722 | struct t3cdev *cdev; | ||
1723 | int must_send; | ||
1724 | u32 credits, dack = 0; | ||
1725 | |||
1726 | if (c3cn->state != C3CN_STATE_ESTABLISHED) | ||
1727 | return; | ||
1728 | |||
1729 | credits = c3cn->copied_seq - c3cn->rcv_wup; | ||
1730 | if (unlikely(!credits)) | ||
1731 | return; | ||
1732 | |||
1733 | cdev = c3cn->cdev; | ||
1734 | |||
1735 | if (unlikely(cxgb3_rx_credit_thres == 0)) | ||
1736 | return; | ||
1737 | |||
1738 | dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); | ||
1739 | |||
1740 | /* | ||
1741 | * For coalescing to work effectively ensure the receive window has | ||
1742 | * at least 16KB left. | ||
1743 | */ | ||
1744 | must_send = credits + 16384 >= cxgb3_rcv_win; | ||
1745 | |||
1746 | if (must_send || credits >= cxgb3_rx_credit_thres) | ||
1747 | c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack); | ||
1748 | } | ||
1749 | |||
1750 | /** | ||
1751 | * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus | ||
1752 | * @c3cn: iscsi tcp connection | ||
1753 | * @skb: skb contains the iscsi pdu | ||
1754 | * | ||
1755 | * Add a list of skbs to a connection send queue. The skbs must comply with | ||
1756 | * the max size limit of the device and have a headroom of at least | ||
1757 | * TX_HEADER_LEN bytes. | ||
1758 | * Return # of bytes queued. | ||
1759 | */ | ||
1760 | int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb) | ||
1761 | { | ||
1762 | struct sk_buff *next; | ||
1763 | int err, copied = 0; | ||
1764 | |||
1765 | spin_lock_bh(&c3cn->lock); | ||
1766 | |||
1767 | if (c3cn->state != C3CN_STATE_ESTABLISHED) { | ||
1768 | c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n", | ||
1769 | c3cn, c3cn->state); | ||
1770 | err = -EAGAIN; | ||
1771 | goto out_err; | ||
1772 | } | ||
1773 | |||
1774 | if (c3cn->err) { | ||
1775 | c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); | ||
1776 | err = -EPIPE; | ||
1777 | goto out_err; | ||
1778 | } | ||
1779 | |||
1780 | if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) { | ||
1781 | c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n", | ||
1782 | c3cn, c3cn->write_seq, c3cn->snd_una, | ||
1783 | cxgb3_snd_win); | ||
1784 | err = -ENOBUFS; | ||
1785 | goto out_err; | ||
1786 | } | ||
1787 | |||
1788 | while (skb) { | ||
1789 | int frags = skb_shinfo(skb)->nr_frags + | ||
1790 | (skb->len != skb->data_len); | ||
1791 | |||
1792 | if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) { | ||
1793 | c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn); | ||
1794 | err = -EINVAL; | ||
1795 | goto out_err; | ||
1796 | } | ||
1797 | |||
1798 | if (frags >= SKB_WR_LIST_SIZE) { | ||
1799 | cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n", | ||
1800 | c3cn, skb_shinfo(skb)->nr_frags, | ||
1801 | skb->len, skb->data_len); | ||
1802 | err = -EINVAL; | ||
1803 | goto out_err; | ||
1804 | } | ||
1805 | |||
1806 | next = skb->next; | ||
1807 | skb->next = NULL; | ||
1808 | skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR); | ||
1809 | copied += skb->len; | ||
1810 | c3cn->write_seq += skb->len + ulp_extra_len(skb); | ||
1811 | skb = next; | ||
1812 | } | ||
1813 | done: | ||
1814 | if (likely(skb_queue_len(&c3cn->write_queue))) | ||
1815 | c3cn_push_tx_frames(c3cn, 1); | ||
1816 | spin_unlock_bh(&c3cn->lock); | ||
1817 | return copied; | ||
1818 | |||
1819 | out_err: | ||
1820 | if (copied == 0 && err == -EPIPE) | ||
1821 | copied = c3cn->err ? c3cn->err : -EPIPE; | ||
1822 | else | ||
1823 | copied = err; | ||
1824 | goto done; | ||
1825 | } | ||
1826 | |||
1827 | static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata) | ||
1828 | { | ||
1829 | struct adap_ports *ports = &cdata->ports; | ||
1830 | struct s3_conn *c3cn; | ||
1831 | int i; | ||
1832 | |||
1833 | for (i = 0; i < cxgb3_max_connect; i++) { | ||
1834 | if (cdata->sport_conn[i]) { | ||
1835 | c3cn = cdata->sport_conn[i]; | ||
1836 | cdata->sport_conn[i] = NULL; | ||
1837 | |||
1838 | spin_lock_bh(&c3cn->lock); | ||
1839 | c3cn->cdev = NULL; | ||
1840 | c3cn_set_flag(c3cn, C3CN_OFFLOAD_DOWN); | ||
1841 | c3cn_closed(c3cn); | ||
1842 | spin_unlock_bh(&c3cn->lock); | ||
1843 | } | ||
1844 | } | ||
1845 | |||
1846 | for (i = 0; i < ports->nports; i++) | ||
1847 | NDEV2CDATA(ports->lldevs[i]) = NULL; | ||
1848 | |||
1849 | cxgb3i_free_big_mem(cdata); | ||
1850 | } | ||
1851 | |||
1852 | void cxgb3i_sdev_cleanup(void) | ||
1853 | { | ||
1854 | struct cxgb3i_sdev_data *cdata; | ||
1855 | |||
1856 | write_lock(&cdata_rwlock); | ||
1857 | list_for_each_entry(cdata, &cdata_list, list) { | ||
1858 | list_del(&cdata->list); | ||
1859 | sdev_data_cleanup(cdata); | ||
1860 | } | ||
1861 | write_unlock(&cdata_rwlock); | ||
1862 | } | ||
1863 | |||
1864 | int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers) | ||
1865 | { | ||
1866 | cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish; | ||
1867 | cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl; | ||
1868 | cpl_handlers[CPL_PEER_CLOSE] = do_peer_close; | ||
1869 | cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req; | ||
1870 | cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl; | ||
1871 | cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl; | ||
1872 | cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack; | ||
1873 | cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr; | ||
1874 | |||
1875 | if (cxgb3_max_connect > CXGB3I_MAX_CONN) | ||
1876 | cxgb3_max_connect = CXGB3I_MAX_CONN; | ||
1877 | return 0; | ||
1878 | } | ||
1879 | |||
1880 | /** | ||
1881 | * cxgb3i_sdev_add - allocate and initialize resources for each adapter found | ||
1882 | * @cdev: t3cdev adapter | ||
1883 | * @client: cxgb3 driver client | ||
1884 | */ | ||
1885 | void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client) | ||
1886 | { | ||
1887 | struct cxgb3i_sdev_data *cdata; | ||
1888 | struct ofld_page_info rx_page_info; | ||
1889 | unsigned int wr_len; | ||
1890 | int mapsize = cxgb3_max_connect * sizeof(struct s3_conn *); | ||
1891 | int i; | ||
1892 | |||
1893 | cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL); | ||
1894 | if (!cdata) { | ||
1895 | cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n", | ||
1896 | cdev, mapsize); | ||
1897 | return; | ||
1898 | } | ||
1899 | |||
1900 | if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 || | ||
1901 | cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 || | ||
1902 | cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { | ||
1903 | cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n", | ||
1904 | cdev); | ||
1905 | goto free_cdata; | ||
1906 | } | ||
1907 | |||
1908 | s3_init_wr_tab(wr_len); | ||
1909 | |||
1910 | spin_lock_init(&cdata->lock); | ||
1911 | INIT_LIST_HEAD(&cdata->list); | ||
1912 | cdata->cdev = cdev; | ||
1913 | cdata->client = client; | ||
1914 | |||
1915 | for (i = 0; i < cdata->ports.nports; i++) | ||
1916 | NDEV2CDATA(cdata->ports.lldevs[i]) = cdata; | ||
1917 | |||
1918 | write_lock(&cdata_rwlock); | ||
1919 | list_add_tail(&cdata->list, &cdata_list); | ||
1920 | write_unlock(&cdata_rwlock); | ||
1921 | |||
1922 | cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev); | ||
1923 | return; | ||
1924 | |||
1925 | free_cdata: | ||
1926 | cxgb3i_free_big_mem(cdata); | ||
1927 | } | ||
1928 | |||
1929 | /** | ||
1930 | * cxgb3i_sdev_remove - free the allocated resources for the adapter | ||
1931 | * @cdev: t3cdev adapter | ||
1932 | */ | ||
1933 | void cxgb3i_sdev_remove(struct t3cdev *cdev) | ||
1934 | { | ||
1935 | struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev); | ||
1936 | |||
1937 | cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev); | ||
1938 | |||
1939 | write_lock(&cdata_rwlock); | ||
1940 | list_del(&cdata->list); | ||
1941 | write_unlock(&cdata_rwlock); | ||
1942 | |||
1943 | sdev_data_cleanup(cdata); | ||
1944 | } | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h deleted file mode 100644 index 6a1d86b1fafe..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ /dev/null | |||
@@ -1,243 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management | ||
3 | * | ||
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | ||
9 | * release for licensing terms and conditions. | ||
10 | * | ||
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | ||
12 | * Karen Xie (kxie@chelsio.com) | ||
13 | */ | ||
14 | |||
15 | #ifndef _CXGB3I_OFFLOAD_H | ||
16 | #define _CXGB3I_OFFLOAD_H | ||
17 | |||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/in.h> | ||
20 | |||
21 | #include "common.h" | ||
22 | #include "adapter.h" | ||
23 | #include "t3cdev.h" | ||
24 | #include "cxgb3_offload.h" | ||
25 | |||
26 | #define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt) | ||
27 | #define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt) | ||
28 | #define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt) | ||
29 | #define cxgb3i_log_debug(fmt, args...) \ | ||
30 | printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args) | ||
31 | |||
32 | /** | ||
33 | * struct s3_conn - an iscsi tcp connection structure | ||
34 | * | ||
35 | * @dev: net device of with connection | ||
36 | * @cdev: adapter t3cdev for net device | ||
37 | * @flags: see c3cn_flags below | ||
38 | * @tid: connection id assigned by the h/w | ||
39 | * @qset: queue set used by connection | ||
40 | * @mss_idx: Maximum Segment Size table index | ||
41 | * @l2t: ARP resolution entry for offload packets | ||
42 | * @wr_max: maximum in-flight writes | ||
43 | * @wr_avail: number of writes available | ||
44 | * @wr_unacked: writes since last request for completion notification | ||
45 | * @wr_pending_head: head of pending write queue | ||
46 | * @wr_pending_tail: tail of pending write queue | ||
47 | * @cpl_close: skb for cpl_close_req | ||
48 | * @cpl_abort_req: skb for cpl_abort_req | ||
49 | * @cpl_abort_rpl: skb for cpl_abort_rpl | ||
50 | * @lock: connection status lock | ||
51 | * @refcnt: reference count on connection | ||
52 | * @state: connection state | ||
53 | * @saddr: source ip/port address | ||
54 | * @daddr: destination ip/port address | ||
55 | * @dst_cache: reference to destination route | ||
56 | * @receive_queue: received PDUs | ||
57 | * @write_queue: un-pushed pending writes | ||
58 | * @retry_timer: retry timer for various operations | ||
59 | * @err: connection error status | ||
60 | * @callback_lock: lock for opaque user context | ||
61 | * @user_data: opaque user context | ||
62 | * @rcv_nxt: next receive seq. # | ||
63 | * @copied_seq: head of yet unread data | ||
64 | * @rcv_wup: rcv_nxt on last window update sent | ||
65 | * @snd_nxt: next sequence we send | ||
66 | * @snd_una: first byte we want an ack for | ||
67 | * @write_seq: tail+1 of data held in send buffer | ||
68 | */ | ||
69 | struct s3_conn { | ||
70 | struct net_device *dev; | ||
71 | struct t3cdev *cdev; | ||
72 | unsigned long flags; | ||
73 | int tid; | ||
74 | int qset; | ||
75 | int mss_idx; | ||
76 | struct l2t_entry *l2t; | ||
77 | int wr_max; | ||
78 | int wr_avail; | ||
79 | int wr_unacked; | ||
80 | struct sk_buff *wr_pending_head; | ||
81 | struct sk_buff *wr_pending_tail; | ||
82 | struct sk_buff *cpl_close; | ||
83 | struct sk_buff *cpl_abort_req; | ||
84 | struct sk_buff *cpl_abort_rpl; | ||
85 | spinlock_t lock; | ||
86 | atomic_t refcnt; | ||
87 | volatile unsigned int state; | ||
88 | struct sockaddr_in saddr; | ||
89 | struct sockaddr_in daddr; | ||
90 | struct dst_entry *dst_cache; | ||
91 | struct sk_buff_head receive_queue; | ||
92 | struct sk_buff_head write_queue; | ||
93 | struct timer_list retry_timer; | ||
94 | int err; | ||
95 | rwlock_t callback_lock; | ||
96 | void *user_data; | ||
97 | |||
98 | u32 rcv_nxt; | ||
99 | u32 copied_seq; | ||
100 | u32 rcv_wup; | ||
101 | u32 snd_nxt; | ||
102 | u32 snd_una; | ||
103 | u32 write_seq; | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * connection state | ||
108 | */ | ||
109 | enum conn_states { | ||
110 | C3CN_STATE_CONNECTING = 1, | ||
111 | C3CN_STATE_ESTABLISHED, | ||
112 | C3CN_STATE_ACTIVE_CLOSE, | ||
113 | C3CN_STATE_PASSIVE_CLOSE, | ||
114 | C3CN_STATE_CLOSE_WAIT_1, | ||
115 | C3CN_STATE_CLOSE_WAIT_2, | ||
116 | C3CN_STATE_ABORTING, | ||
117 | C3CN_STATE_CLOSED, | ||
118 | }; | ||
119 | |||
120 | static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn) | ||
121 | { | ||
122 | return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE; | ||
123 | } | ||
124 | static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn) | ||
125 | { | ||
126 | return c3cn->state == C3CN_STATE_ESTABLISHED; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Connection flags -- many to track some close related events. | ||
131 | */ | ||
132 | enum c3cn_flags { | ||
133 | C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */ | ||
134 | C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */ | ||
135 | C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */ | ||
136 | C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */ | ||
137 | C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */ | ||
138 | C3CN_OFFLOAD_DOWN /* offload function off */ | ||
139 | }; | ||
140 | |||
141 | /** | ||
142 | * cxgb3i_sdev_data - Per adapter data. | ||
143 | * Linked off of each Ethernet device port on the adapter. | ||
144 | * Also available via the t3cdev structure since we have pointers to our port | ||
145 | * net_device's there ... | ||
146 | * | ||
147 | * @list: list head to link elements | ||
148 | * @cdev: t3cdev adapter | ||
149 | * @client: CPL client pointer | ||
150 | * @ports: array of adapter ports | ||
151 | * @sport_next: next port | ||
152 | * @sport_conn: source port connection | ||
153 | */ | ||
154 | struct cxgb3i_sdev_data { | ||
155 | struct list_head list; | ||
156 | struct t3cdev *cdev; | ||
157 | struct cxgb3_client *client; | ||
158 | struct adap_ports ports; | ||
159 | spinlock_t lock; | ||
160 | unsigned int sport_next; | ||
161 | struct s3_conn *sport_conn[0]; | ||
162 | }; | ||
163 | #define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr) | ||
164 | #define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev) | ||
165 | |||
166 | void cxgb3i_sdev_cleanup(void); | ||
167 | int cxgb3i_sdev_init(cxgb3_cpl_handler_func *); | ||
168 | void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *); | ||
169 | void cxgb3i_sdev_remove(struct t3cdev *); | ||
170 | |||
171 | struct s3_conn *cxgb3i_c3cn_create(void); | ||
172 | int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *, | ||
173 | struct sockaddr_in *); | ||
174 | void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); | ||
175 | int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); | ||
176 | void cxgb3i_c3cn_release(struct s3_conn *); | ||
177 | |||
178 | /** | ||
179 | * cxgb3_skb_cb - control block for received pdu state and ULP mode management. | ||
180 | * | ||
181 | * @flag: see C3CB_FLAG_* below | ||
182 | * @ulp_mode: ULP mode/submode of sk_buff | ||
183 | * @seq: tcp sequence number | ||
184 | */ | ||
185 | struct cxgb3_skb_rx_cb { | ||
186 | __u32 ddigest; /* data digest */ | ||
187 | __u32 pdulen; /* recovered pdu length */ | ||
188 | }; | ||
189 | |||
190 | struct cxgb3_skb_tx_cb { | ||
191 | struct sk_buff *wr_next; /* next wr */ | ||
192 | }; | ||
193 | |||
194 | struct cxgb3_skb_cb { | ||
195 | __u8 flags; | ||
196 | __u8 ulp_mode; | ||
197 | __u32 seq; | ||
198 | union { | ||
199 | struct cxgb3_skb_rx_cb rx; | ||
200 | struct cxgb3_skb_tx_cb tx; | ||
201 | }; | ||
202 | }; | ||
203 | |||
204 | #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) | ||
205 | #define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags) | ||
206 | #define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) | ||
207 | #define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq) | ||
208 | #define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest) | ||
209 | #define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen) | ||
210 | #define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next) | ||
211 | |||
212 | enum c3cb_flags { | ||
213 | C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ | ||
214 | C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */ | ||
215 | C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */ | ||
216 | }; | ||
217 | |||
218 | /** | ||
219 | * sge_opaque_hdr - | ||
220 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets | ||
221 | * and for which we must reserve space. | ||
222 | */ | ||
223 | struct sge_opaque_hdr { | ||
224 | void *dev; | ||
225 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
226 | }; | ||
227 | |||
228 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ | ||
229 | #define TX_HEADER_LEN \ | ||
230 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) | ||
231 | #define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN) | ||
232 | |||
233 | /* | ||
234 | * get and set private ip for iscsi traffic | ||
235 | */ | ||
236 | #define cxgb3i_get_private_ipv4addr(ndev) \ | ||
237 | (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) | ||
238 | #define cxgb3i_set_private_ipv4addr(ndev, addr) \ | ||
239 | (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr | ||
240 | |||
241 | /* max. connections per adapter */ | ||
242 | #define CXGB3I_MAX_CONN 16384 | ||
243 | #endif /* _CXGB3_OFFLOAD_H */ | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c deleted file mode 100644 index dc5e3e77a351..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c +++ /dev/null | |||
@@ -1,495 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * Copyright (c) 2008 Mike Christie | ||
6 | * Copyright (c) 2008 Red Hat, Inc. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation. | ||
11 | * | ||
12 | * Written by: Karen Xie (kxie@chelsio.com) | ||
13 | */ | ||
14 | |||
15 | #include <linux/slab.h> | ||
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/crypto.h> | ||
18 | #include <scsi/scsi_cmnd.h> | ||
19 | #include <scsi/scsi_host.h> | ||
20 | |||
21 | #include "cxgb3i.h" | ||
22 | #include "cxgb3i_pdu.h" | ||
23 | |||
24 | #ifdef __DEBUG_CXGB3I_RX__ | ||
25 | #define cxgb3i_rx_debug cxgb3i_log_debug | ||
26 | #else | ||
27 | #define cxgb3i_rx_debug(fmt...) | ||
28 | #endif | ||
29 | |||
30 | #ifdef __DEBUG_CXGB3I_TX__ | ||
31 | #define cxgb3i_tx_debug cxgb3i_log_debug | ||
32 | #else | ||
33 | #define cxgb3i_tx_debug(fmt...) | ||
34 | #endif | ||
35 | |||
36 | /* always allocate rooms for AHS */ | ||
37 | #define SKB_TX_PDU_HEADER_LEN \ | ||
38 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) | ||
39 | static unsigned int skb_extra_headroom; | ||
40 | static struct page *pad_page; | ||
41 | |||
42 | /* | ||
43 | * pdu receive, interact with libiscsi_tcp | ||
44 | */ | ||
45 | static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb, | ||
46 | unsigned int offset, int offloaded) | ||
47 | { | ||
48 | int status = 0; | ||
49 | int bytes_read; | ||
50 | |||
51 | bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); | ||
52 | switch (status) { | ||
53 | case ISCSI_TCP_CONN_ERR: | ||
54 | return -EIO; | ||
55 | case ISCSI_TCP_SUSPENDED: | ||
56 | /* no transfer - just have caller flush queue */ | ||
57 | return bytes_read; | ||
58 | case ISCSI_TCP_SKB_DONE: | ||
59 | /* | ||
60 | * pdus should always fit in the skb and we should get | ||
61 | * segment done notifcation. | ||
62 | */ | ||
63 | iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); | ||
64 | return -EFAULT; | ||
65 | case ISCSI_TCP_SEGMENT_DONE: | ||
66 | return bytes_read; | ||
67 | default: | ||
68 | iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb " | ||
69 | "status %d\n", status); | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn, | ||
75 | struct sk_buff *skb) | ||
76 | { | ||
77 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
78 | bool offloaded = 0; | ||
79 | unsigned int offset; | ||
80 | int rc; | ||
81 | |||
82 | cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n", | ||
83 | conn, skb, skb->len, skb_ulp_mode(skb)); | ||
84 | |||
85 | if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { | ||
86 | iscsi_conn_failure(conn, ISCSI_ERR_PROTO); | ||
87 | return -EIO; | ||
88 | } | ||
89 | |||
90 | if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) { | ||
91 | iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); | ||
92 | return -EIO; | ||
93 | } | ||
94 | |||
95 | if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) { | ||
96 | iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); | ||
97 | return -EIO; | ||
98 | } | ||
99 | |||
100 | /* iscsi hdr */ | ||
101 | rc = read_pdu_skb(conn, skb, 0, 0); | ||
102 | if (rc <= 0) | ||
103 | return rc; | ||
104 | |||
105 | if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) | ||
106 | return 0; | ||
107 | |||
108 | offset = rc; | ||
109 | if (conn->hdrdgst_en) | ||
110 | offset += ISCSI_DIGEST_SIZE; | ||
111 | |||
112 | /* iscsi data */ | ||
113 | if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) { | ||
114 | cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, " | ||
115 | "itt 0x%x.\n", | ||
116 | skb, | ||
117 | tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK, | ||
118 | tcp_conn->in.datalen, | ||
119 | ntohl(tcp_conn->in.hdr->itt)); | ||
120 | offloaded = 1; | ||
121 | } else { | ||
122 | cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, " | ||
123 | "itt 0x%x.\n", | ||
124 | skb, | ||
125 | tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK, | ||
126 | tcp_conn->in.datalen, | ||
127 | ntohl(tcp_conn->in.hdr->itt)); | ||
128 | offset += sizeof(struct cpl_iscsi_hdr_norss); | ||
129 | } | ||
130 | |||
131 | rc = read_pdu_skb(conn, skb, offset, offloaded); | ||
132 | if (rc < 0) | ||
133 | return rc; | ||
134 | else | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * pdu transmit, interact with libiscsi_tcp | ||
140 | */ | ||
141 | static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) | ||
142 | { | ||
143 | u8 submode = 0; | ||
144 | |||
145 | if (hcrc) | ||
146 | submode |= 1; | ||
147 | if (dcrc) | ||
148 | submode |= 2; | ||
149 | skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode; | ||
150 | } | ||
151 | |||
152 | void cxgb3i_conn_cleanup_task(struct iscsi_task *task) | ||
153 | { | ||
154 | struct cxgb3i_task_data *tdata = task->dd_data + | ||
155 | sizeof(struct iscsi_tcp_task); | ||
156 | |||
157 | /* never reached the xmit task callout */ | ||
158 | if (tdata->skb) | ||
159 | __kfree_skb(tdata->skb); | ||
160 | memset(tdata, 0, sizeof(struct cxgb3i_task_data)); | ||
161 | |||
162 | /* MNC - Do we need a check in case this is called but | ||
163 | * cxgb3i_conn_alloc_pdu has never been called on the task */ | ||
164 | cxgb3i_release_itt(task, task->hdr_itt); | ||
165 | iscsi_tcp_cleanup_task(task); | ||
166 | } | ||
167 | |||
168 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, | ||
169 | unsigned int offset, unsigned int *off, | ||
170 | struct scatterlist **sgp) | ||
171 | { | ||
172 | int i; | ||
173 | struct scatterlist *sg; | ||
174 | |||
175 | for_each_sg(sgl, sg, sgcnt, i) { | ||
176 | if (offset < sg->length) { | ||
177 | *off = offset; | ||
178 | *sgp = sg; | ||
179 | return 0; | ||
180 | } | ||
181 | offset -= sg->length; | ||
182 | } | ||
183 | return -EFAULT; | ||
184 | } | ||
185 | |||
186 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, | ||
187 | unsigned int dlen, skb_frag_t *frags, | ||
188 | int frag_max) | ||
189 | { | ||
190 | unsigned int datalen = dlen; | ||
191 | unsigned int sglen = sg->length - sgoffset; | ||
192 | struct page *page = sg_page(sg); | ||
193 | int i; | ||
194 | |||
195 | i = 0; | ||
196 | do { | ||
197 | unsigned int copy; | ||
198 | |||
199 | if (!sglen) { | ||
200 | sg = sg_next(sg); | ||
201 | if (!sg) { | ||
202 | cxgb3i_log_error("%s, sg NULL, len %u/%u.\n", | ||
203 | __func__, datalen, dlen); | ||
204 | return -EINVAL; | ||
205 | } | ||
206 | sgoffset = 0; | ||
207 | sglen = sg->length; | ||
208 | page = sg_page(sg); | ||
209 | |||
210 | } | ||
211 | copy = min(datalen, sglen); | ||
212 | if (i && page == frags[i - 1].page && | ||
213 | sgoffset + sg->offset == | ||
214 | frags[i - 1].page_offset + frags[i - 1].size) { | ||
215 | frags[i - 1].size += copy; | ||
216 | } else { | ||
217 | if (i >= frag_max) { | ||
218 | cxgb3i_log_error("%s, too many pages %u, " | ||
219 | "dlen %u.\n", __func__, | ||
220 | frag_max, dlen); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | frags[i].page = page; | ||
225 | frags[i].page_offset = sg->offset + sgoffset; | ||
226 | frags[i].size = copy; | ||
227 | i++; | ||
228 | } | ||
229 | datalen -= copy; | ||
230 | sgoffset += copy; | ||
231 | sglen -= copy; | ||
232 | } while (datalen); | ||
233 | |||
234 | return i; | ||
235 | } | ||
236 | |||
237 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | ||
238 | { | ||
239 | struct iscsi_conn *conn = task->conn; | ||
240 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
241 | struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task); | ||
242 | struct scsi_cmnd *sc = task->sc; | ||
243 | int headroom = SKB_TX_PDU_HEADER_LEN; | ||
244 | |||
245 | tcp_task->dd_data = tdata; | ||
246 | task->hdr = NULL; | ||
247 | |||
248 | /* write command, need to send data pdus */ | ||
249 | if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT || | ||
250 | (opcode == ISCSI_OP_SCSI_CMD && | ||
251 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) | ||
252 | headroom += min(skb_extra_headroom, conn->max_xmit_dlength); | ||
253 | |||
254 | tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC); | ||
255 | if (!tdata->skb) | ||
256 | return -ENOMEM; | ||
257 | skb_reserve(tdata->skb, TX_HEADER_LEN); | ||
258 | |||
259 | cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", | ||
260 | task, opcode, tdata->skb); | ||
261 | |||
262 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; | ||
263 | task->hdr_max = SKB_TX_PDU_HEADER_LEN; | ||
264 | |||
265 | /* data_out uses scsi_cmd's itt */ | ||
266 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) | ||
267 | cxgb3i_reserve_itt(task, &task->hdr->itt); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | ||
273 | unsigned int count) | ||
274 | { | ||
275 | struct iscsi_conn *conn = task->conn; | ||
276 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
277 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
278 | struct sk_buff *skb = tdata->skb; | ||
279 | unsigned int datalen = count; | ||
280 | int i, padlen = iscsi_padding(count); | ||
281 | struct page *pg; | ||
282 | |||
283 | cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", | ||
284 | task, task->sc, offset, count, skb); | ||
285 | |||
286 | skb_put(skb, task->hdr_len); | ||
287 | tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); | ||
288 | if (!count) | ||
289 | return 0; | ||
290 | |||
291 | if (task->sc) { | ||
292 | struct scsi_data_buffer *sdb = scsi_out(task->sc); | ||
293 | struct scatterlist *sg = NULL; | ||
294 | int err; | ||
295 | |||
296 | tdata->offset = offset; | ||
297 | tdata->count = count; | ||
298 | err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents, | ||
299 | tdata->offset, &tdata->sgoffset, &sg); | ||
300 | if (err < 0) { | ||
301 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n", | ||
302 | sdb->table.nents, tdata->offset, | ||
303 | sdb->length); | ||
304 | return err; | ||
305 | } | ||
306 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, | ||
307 | tdata->frags, MAX_PDU_FRAGS); | ||
308 | if (err < 0) { | ||
309 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n", | ||
310 | sdb->table.nents, tdata->offset, | ||
311 | tdata->count); | ||
312 | return err; | ||
313 | } | ||
314 | tdata->nr_frags = err; | ||
315 | |||
316 | if (tdata->nr_frags > MAX_SKB_FRAGS || | ||
317 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { | ||
318 | char *dst = skb->data + task->hdr_len; | ||
319 | skb_frag_t *frag = tdata->frags; | ||
320 | |||
321 | /* data fits in the skb's headroom */ | ||
322 | for (i = 0; i < tdata->nr_frags; i++, frag++) { | ||
323 | char *src = kmap_atomic(frag->page, | ||
324 | KM_SOFTIRQ0); | ||
325 | |||
326 | memcpy(dst, src+frag->page_offset, frag->size); | ||
327 | dst += frag->size; | ||
328 | kunmap_atomic(src, KM_SOFTIRQ0); | ||
329 | } | ||
330 | if (padlen) { | ||
331 | memset(dst, 0, padlen); | ||
332 | padlen = 0; | ||
333 | } | ||
334 | skb_put(skb, count + padlen); | ||
335 | } else { | ||
336 | /* data fit into frag_list */ | ||
337 | for (i = 0; i < tdata->nr_frags; i++) | ||
338 | get_page(tdata->frags[i].page); | ||
339 | |||
340 | memcpy(skb_shinfo(skb)->frags, tdata->frags, | ||
341 | sizeof(skb_frag_t) * tdata->nr_frags); | ||
342 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; | ||
343 | skb->len += count; | ||
344 | skb->data_len += count; | ||
345 | skb->truesize += count; | ||
346 | } | ||
347 | |||
348 | } else { | ||
349 | pg = virt_to_page(task->data); | ||
350 | |||
351 | get_page(pg); | ||
352 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), | ||
353 | count); | ||
354 | skb->len += count; | ||
355 | skb->data_len += count; | ||
356 | skb->truesize += count; | ||
357 | } | ||
358 | |||
359 | if (padlen) { | ||
360 | i = skb_shinfo(skb)->nr_frags; | ||
361 | get_page(pad_page); | ||
362 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0, | ||
363 | padlen); | ||
364 | |||
365 | skb->data_len += padlen; | ||
366 | skb->truesize += padlen; | ||
367 | skb->len += padlen; | ||
368 | } | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | ||
374 | { | ||
375 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | ||
376 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | ||
377 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
378 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
379 | struct sk_buff *skb = tdata->skb; | ||
380 | unsigned int datalen; | ||
381 | int err; | ||
382 | |||
383 | if (!skb) | ||
384 | return 0; | ||
385 | |||
386 | datalen = skb->data_len; | ||
387 | tdata->skb = NULL; | ||
388 | err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); | ||
389 | if (err > 0) { | ||
390 | int pdulen = err; | ||
391 | |||
392 | cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
393 | task, skb, skb->len, skb->data_len, err); | ||
394 | |||
395 | if (task->conn->hdrdgst_en) | ||
396 | pdulen += ISCSI_DIGEST_SIZE; | ||
397 | if (datalen && task->conn->datadgst_en) | ||
398 | pdulen += ISCSI_DIGEST_SIZE; | ||
399 | |||
400 | task->conn->txdata_octets += pdulen; | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | if (err == -EAGAIN || err == -ENOBUFS) { | ||
405 | /* reset skb to send when we are called again */ | ||
406 | tdata->skb = skb; | ||
407 | return err; | ||
408 | } | ||
409 | |||
410 | kfree_skb(skb); | ||
411 | cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", | ||
412 | task->itt, skb, skb->len, skb->data_len, err); | ||
413 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); | ||
414 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); | ||
415 | return err; | ||
416 | } | ||
417 | |||
418 | int cxgb3i_pdu_init(void) | ||
419 | { | ||
420 | if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS)) | ||
421 | skb_extra_headroom = SKB_TX_HEADROOM; | ||
422 | pad_page = alloc_page(GFP_KERNEL); | ||
423 | if (!pad_page) | ||
424 | return -ENOMEM; | ||
425 | memset(page_address(pad_page), 0, PAGE_SIZE); | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | void cxgb3i_pdu_cleanup(void) | ||
430 | { | ||
431 | if (pad_page) { | ||
432 | __free_page(pad_page); | ||
433 | pad_page = NULL; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | ||
438 | { | ||
439 | struct sk_buff *skb; | ||
440 | unsigned int read = 0; | ||
441 | struct iscsi_conn *conn = c3cn->user_data; | ||
442 | int err = 0; | ||
443 | |||
444 | cxgb3i_rx_debug("cn 0x%p.\n", c3cn); | ||
445 | |||
446 | read_lock(&c3cn->callback_lock); | ||
447 | if (unlikely(!conn || conn->suspend_rx)) { | ||
448 | cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n", | ||
449 | conn, conn ? conn->id : 0xFF, | ||
450 | conn ? conn->suspend_rx : 0xFF); | ||
451 | read_unlock(&c3cn->callback_lock); | ||
452 | return; | ||
453 | } | ||
454 | skb = skb_peek(&c3cn->receive_queue); | ||
455 | while (!err && skb) { | ||
456 | __skb_unlink(skb, &c3cn->receive_queue); | ||
457 | read += skb_rx_pdulen(skb); | ||
458 | cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n", | ||
459 | conn, c3cn, skb, skb_rx_pdulen(skb)); | ||
460 | err = cxgb3i_conn_read_pdu_skb(conn, skb); | ||
461 | __kfree_skb(skb); | ||
462 | skb = skb_peek(&c3cn->receive_queue); | ||
463 | } | ||
464 | read_unlock(&c3cn->callback_lock); | ||
465 | c3cn->copied_seq += read; | ||
466 | cxgb3i_c3cn_rx_credits(c3cn, read); | ||
467 | conn->rxdata_octets += read; | ||
468 | |||
469 | if (err) { | ||
470 | cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err); | ||
471 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn) | ||
476 | { | ||
477 | struct iscsi_conn *conn = c3cn->user_data; | ||
478 | |||
479 | cxgb3i_tx_debug("cn 0x%p.\n", c3cn); | ||
480 | if (conn) { | ||
481 | cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id); | ||
482 | iscsi_conn_queue_work(conn); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | void cxgb3i_conn_closing(struct s3_conn *c3cn) | ||
487 | { | ||
488 | struct iscsi_conn *conn; | ||
489 | |||
490 | read_lock(&c3cn->callback_lock); | ||
491 | conn = c3cn->user_data; | ||
492 | if (conn && c3cn->state != C3CN_STATE_ESTABLISHED) | ||
493 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
494 | read_unlock(&c3cn->callback_lock); | ||
495 | } | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h deleted file mode 100644 index 0770b23d90da..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #ifndef __CXGB3I_ULP2_PDU_H__ | ||
14 | #define __CXGB3I_ULP2_PDU_H__ | ||
15 | |||
16 | struct cpl_iscsi_hdr_norss { | ||
17 | union opcode_tid ot; | ||
18 | u16 pdu_len_ddp; | ||
19 | u16 len; | ||
20 | u32 seq; | ||
21 | u16 urg; | ||
22 | u8 rsvd; | ||
23 | u8 status; | ||
24 | }; | ||
25 | |||
26 | struct cpl_rx_data_ddp_norss { | ||
27 | union opcode_tid ot; | ||
28 | u16 urg; | ||
29 | u16 len; | ||
30 | u32 seq; | ||
31 | u32 nxt_seq; | ||
32 | u32 ulp_crc; | ||
33 | u32 ddp_status; | ||
34 | }; | ||
35 | |||
36 | #define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */ | ||
37 | #define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */ | ||
38 | #define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */ | ||
39 | #define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */ | ||
40 | #define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */ | ||
41 | #define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */ | ||
42 | #define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ | ||
43 | #define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ | ||
44 | #define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ | ||
45 | #define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */ | ||
46 | #define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */ | ||
47 | #define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ | ||
48 | #define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */ | ||
49 | |||
50 | #define ULP2_FLAG_DATA_READY 0x1 | ||
51 | #define ULP2_FLAG_DATA_DDPED 0x2 | ||
52 | #define ULP2_FLAG_HCRC_ERROR 0x10 | ||
53 | #define ULP2_FLAG_DCRC_ERROR 0x20 | ||
54 | #define ULP2_FLAG_PAD_ERROR 0x40 | ||
55 | |||
56 | void cxgb3i_conn_closing(struct s3_conn *c3cn); | ||
57 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); | ||
58 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn); | ||
59 | #endif | ||
diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig index a470e389f6ba..17eb5d522f42 100644 --- a/drivers/scsi/cxgbi/Kconfig +++ b/drivers/scsi/cxgbi/Kconfig | |||
@@ -1 +1,2 @@ | |||
1 | source "drivers/scsi/cxgbi/cxgb3i/Kconfig" | ||
1 | source "drivers/scsi/cxgbi/cxgb4i/Kconfig" | 2 | source "drivers/scsi/cxgbi/cxgb4i/Kconfig" |
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile index 9e8f604888dc..86007e344955 100644 --- a/drivers/scsi/cxgbi/Makefile +++ b/drivers/scsi/cxgbi/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ | ||
1 | obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ | 2 | obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ |
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild index 70d060b7ff4f..09dbf9efc8ea 100644 --- a/drivers/scsi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild | |||
@@ -1,4 +1,3 @@ | |||
1 | EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 | 1 | EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 |
2 | 2 | ||
3 | cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o | ||
4 | obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o | 3 | obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o |
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index bfdcaf5c9c57..5cf4e9831f1b 100644 --- a/drivers/scsi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | config SCSI_CXGB3_ISCSI | 1 | config SCSI_CXGB3_ISCSI |
2 | tristate "Chelsio S3xx iSCSI support" | 2 | tristate "Chelsio T3 iSCSI support" |
3 | depends on CHELSIO_T3_DEPENDS | 3 | depends on CHELSIO_T3_DEPENDS |
4 | select CHELSIO_T3 | 4 | select CHELSIO_T3 |
5 | select SCSI_ISCSI_ATTRS | 5 | select SCSI_ISCSI_ATTRS |
6 | ---help--- | 6 | ---help--- |
7 | This driver supports iSCSI offload for the Chelsio S3 series devices. | 7 | This driver supports iSCSI offload for the Chelsio T3 devices. |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c new file mode 100644 index 000000000000..a01c1e238938 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -0,0 +1,1432 @@ | |||
1 | /* | ||
2 | * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management | ||
3 | * | ||
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | ||
9 | * release for licensing terms and conditions. | ||
10 | * | ||
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | ||
12 | * Karen Xie (kxie@chelsio.com) | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | ||
16 | |||
17 | #include <linux/version.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/moduleparam.h> | ||
20 | #include <scsi/scsi_host.h> | ||
21 | |||
22 | #include "common.h" | ||
23 | #include "t3_cpl.h" | ||
24 | #include "t3cdev.h" | ||
25 | #include "cxgb3_defs.h" | ||
26 | #include "cxgb3_ctl_defs.h" | ||
27 | #include "cxgb3_offload.h" | ||
28 | #include "firmware_exports.h" | ||
29 | #include "cxgb3i.h" | ||
30 | |||
31 | static unsigned int dbg_level; | ||
32 | #include "../libcxgbi.h" | ||
33 | |||
34 | #define DRV_MODULE_NAME "cxgb3i" | ||
35 | #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" | ||
36 | #define DRV_MODULE_VERSION "2.0.0" | ||
37 | #define DRV_MODULE_RELDATE "Jun. 2010" | ||
38 | |||
39 | static char version[] = | ||
40 | DRV_MODULE_DESC " " DRV_MODULE_NAME | ||
41 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
42 | |||
43 | MODULE_AUTHOR("Chelsio Communications, Inc."); | ||
44 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | ||
45 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
46 | MODULE_LICENSE("GPL"); | ||
47 | |||
48 | module_param(dbg_level, uint, 0644); | ||
49 | MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); | ||
50 | |||
51 | static int cxgb3i_rcv_win = 256 * 1024; | ||
52 | module_param(cxgb3i_rcv_win, int, 0644); | ||
53 | MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); | ||
54 | |||
55 | static int cxgb3i_snd_win = 128 * 1024; | ||
56 | module_param(cxgb3i_snd_win, int, 0644); | ||
57 | MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); | ||
58 | |||
59 | static int cxgb3i_rx_credit_thres = 10 * 1024; | ||
60 | module_param(cxgb3i_rx_credit_thres, int, 0644); | ||
61 | MODULE_PARM_DESC(rx_credit_thres, | ||
62 | "RX credits return threshold in bytes (default=10KB)"); | ||
63 | |||
64 | static unsigned int cxgb3i_max_connect = 8 * 1024; | ||
65 | module_param(cxgb3i_max_connect, uint, 0644); | ||
66 | MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); | ||
67 | |||
68 | static unsigned int cxgb3i_sport_base = 20000; | ||
69 | module_param(cxgb3i_sport_base, uint, 0644); | ||
70 | MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); | ||
71 | |||
72 | static void cxgb3i_dev_open(struct t3cdev *); | ||
73 | static void cxgb3i_dev_close(struct t3cdev *); | ||
74 | static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); | ||
75 | |||
76 | static struct cxgb3_client t3_client = { | ||
77 | .name = DRV_MODULE_NAME, | ||
78 | .handlers = cxgb3i_cpl_handlers, | ||
79 | .add = cxgb3i_dev_open, | ||
80 | .remove = cxgb3i_dev_close, | ||
81 | .event_handler = cxgb3i_dev_event_handler, | ||
82 | }; | ||
83 | |||
84 | static struct scsi_host_template cxgb3i_host_template = { | ||
85 | .module = THIS_MODULE, | ||
86 | .name = DRV_MODULE_NAME, | ||
87 | .proc_name = DRV_MODULE_NAME, | ||
88 | .can_queue = CXGB3I_SCSI_HOST_QDEPTH, | ||
89 | .queuecommand = iscsi_queuecommand, | ||
90 | .change_queue_depth = iscsi_change_queue_depth, | ||
91 | .sg_tablesize = SG_ALL, | ||
92 | .max_sectors = 0xFFFF, | ||
93 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | ||
94 | .eh_abort_handler = iscsi_eh_abort, | ||
95 | .eh_device_reset_handler = iscsi_eh_device_reset, | ||
96 | .eh_target_reset_handler = iscsi_eh_recover_target, | ||
97 | .target_alloc = iscsi_target_alloc, | ||
98 | .use_clustering = DISABLE_CLUSTERING, | ||
99 | .this_id = -1, | ||
100 | }; | ||
101 | |||
102 | static struct iscsi_transport cxgb3i_iscsi_transport = { | ||
103 | .owner = THIS_MODULE, | ||
104 | .name = DRV_MODULE_NAME, | ||
105 | /* owner and name should be set already */ | ||
106 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | ||
107 | | CAP_DATADGST | CAP_DIGEST_OFFLOAD | | ||
108 | CAP_PADDING_OFFLOAD, | ||
109 | .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | | ||
110 | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | | ||
111 | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | | ||
112 | ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | | ||
113 | ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | | ||
114 | ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | | ||
115 | ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | | ||
116 | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | | ||
117 | ISCSI_PERSISTENT_ADDRESS | | ||
118 | ISCSI_TARGET_NAME | ISCSI_TPGT | | ||
119 | ISCSI_USERNAME | ISCSI_PASSWORD | | ||
120 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | ||
121 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | ||
122 | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | | ||
123 | ISCSI_PING_TMO | ISCSI_RECV_TMO | | ||
124 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | ||
125 | .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | | ||
126 | ISCSI_HOST_INITIATOR_NAME | | ||
127 | ISCSI_HOST_NETDEV_NAME, | ||
128 | .get_host_param = cxgbi_get_host_param, | ||
129 | .set_host_param = cxgbi_set_host_param, | ||
130 | /* session management */ | ||
131 | .create_session = cxgbi_create_session, | ||
132 | .destroy_session = cxgbi_destroy_session, | ||
133 | .get_session_param = iscsi_session_get_param, | ||
134 | /* connection management */ | ||
135 | .create_conn = cxgbi_create_conn, | ||
136 | .bind_conn = cxgbi_bind_conn, | ||
137 | .destroy_conn = iscsi_tcp_conn_teardown, | ||
138 | .start_conn = iscsi_conn_start, | ||
139 | .stop_conn = iscsi_conn_stop, | ||
140 | .get_conn_param = cxgbi_get_conn_param, | ||
141 | .set_param = cxgbi_set_conn_param, | ||
142 | .get_stats = cxgbi_get_conn_stats, | ||
143 | /* pdu xmit req from user space */ | ||
144 | .send_pdu = iscsi_conn_send_pdu, | ||
145 | /* task */ | ||
146 | .init_task = iscsi_tcp_task_init, | ||
147 | .xmit_task = iscsi_tcp_task_xmit, | ||
148 | .cleanup_task = cxgbi_cleanup_task, | ||
149 | /* pdu */ | ||
150 | .alloc_pdu = cxgbi_conn_alloc_pdu, | ||
151 | .init_pdu = cxgbi_conn_init_pdu, | ||
152 | .xmit_pdu = cxgbi_conn_xmit_pdu, | ||
153 | .parse_pdu_itt = cxgbi_parse_pdu_itt, | ||
154 | /* TCP connect/disconnect */ | ||
155 | .ep_connect = cxgbi_ep_connect, | ||
156 | .ep_poll = cxgbi_ep_poll, | ||
157 | .ep_disconnect = cxgbi_ep_disconnect, | ||
158 | /* Error recovery timeout call */ | ||
159 | .session_recovery_timedout = iscsi_session_recovery_timedout, | ||
160 | }; | ||
161 | |||
162 | static struct scsi_transport_template *cxgb3i_stt; | ||
163 | |||
164 | /* | ||
165 | * CPL (Chelsio Protocol Language) defines a message passing interface between | ||
166 | * the host driver and Chelsio asic. | ||
167 | * The section below implments CPLs that related to iscsi tcp connection | ||
168 | * open/close/abort and data send/receive. | ||
169 | */ | ||
170 | |||
171 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); | ||
172 | |||
173 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | ||
174 | const struct l2t_entry *e) | ||
175 | { | ||
176 | unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); | ||
177 | struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; | ||
178 | |||
179 | skb->priority = CPL_PRIORITY_SETUP; | ||
180 | |||
181 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
182 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); | ||
183 | req->local_port = csk->saddr.sin_port; | ||
184 | req->peer_port = csk->daddr.sin_port; | ||
185 | req->local_ip = csk->saddr.sin_addr.s_addr; | ||
186 | req->peer_ip = csk->daddr.sin_addr.s_addr; | ||
187 | |||
188 | req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | | ||
189 | V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | | ||
190 | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); | ||
191 | req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | | ||
192 | V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); | ||
193 | |||
194 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
195 | "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", | ||
196 | csk, csk->state, csk->flags, csk->atid, | ||
197 | &req->local_ip, ntohs(req->local_port), | ||
198 | &req->peer_ip, ntohs(req->peer_port), | ||
199 | csk->mss_idx, e->idx, e->smt_idx); | ||
200 | |||
201 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | ||
202 | } | ||
203 | |||
204 | static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | ||
205 | { | ||
206 | cxgbi_sock_act_open_req_arp_failure(NULL, skb); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * CPL connection close request: host -> | ||
211 | * | ||
212 | * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to | ||
213 | * the write queue (i.e., after any unsent txt data). | ||
214 | */ | ||
215 | static void send_close_req(struct cxgbi_sock *csk) | ||
216 | { | ||
217 | struct sk_buff *skb = csk->cpl_close; | ||
218 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; | ||
219 | unsigned int tid = csk->tid; | ||
220 | |||
221 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
222 | "csk 0x%p,%u,0x%lx,%u.\n", | ||
223 | csk, csk->state, csk->flags, csk->tid); | ||
224 | |||
225 | csk->cpl_close = NULL; | ||
226 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | ||
227 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | ||
228 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | ||
229 | req->rsvd = htonl(csk->write_seq); | ||
230 | |||
231 | cxgbi_sock_skb_entail(csk, skb); | ||
232 | if (csk->state >= CTP_ESTABLISHED) | ||
233 | push_tx_frames(csk, 1); | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * CPL connection abort request: host -> | ||
238 | * | ||
239 | * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs | ||
240 | * for the same connection and also that we do not try to send a message | ||
241 | * after the connection has closed. | ||
242 | */ | ||
243 | static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) | ||
244 | { | ||
245 | struct cpl_abort_req *req = cplhdr(skb); | ||
246 | |||
247 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
248 | "t3dev 0x%p, tid %u, skb 0x%p.\n", | ||
249 | tdev, GET_TID(req), skb); | ||
250 | req->cmd = CPL_ABORT_NO_RST; | ||
251 | cxgb3_ofld_send(tdev, skb); | ||
252 | } | ||
253 | |||
254 | static void send_abort_req(struct cxgbi_sock *csk) | ||
255 | { | ||
256 | struct sk_buff *skb = csk->cpl_abort_req; | ||
257 | struct cpl_abort_req *req; | ||
258 | |||
259 | if (unlikely(csk->state == CTP_ABORTING || !skb)) | ||
260 | return; | ||
261 | cxgbi_sock_set_state(csk, CTP_ABORTING); | ||
262 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); | ||
263 | /* Purge the send queue so we don't send anything after an abort. */ | ||
264 | cxgbi_sock_purge_write_queue(csk); | ||
265 | |||
266 | csk->cpl_abort_req = NULL; | ||
267 | req = (struct cpl_abort_req *)skb->head; | ||
268 | skb->priority = CPL_PRIORITY_DATA; | ||
269 | set_arp_failure_handler(skb, abort_arp_failure); | ||
270 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | ||
271 | req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | ||
272 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); | ||
273 | req->rsvd0 = htonl(csk->snd_nxt); | ||
274 | req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); | ||
275 | req->cmd = CPL_ABORT_SEND_RST; | ||
276 | |||
277 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
278 | "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", | ||
279 | csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, | ||
280 | req->rsvd1); | ||
281 | |||
282 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * CPL connection abort reply: host -> | ||
287 | * | ||
288 | * Send an ABORT_RPL message in response of the ABORT_REQ received. | ||
289 | */ | ||
290 | static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) | ||
291 | { | ||
292 | struct sk_buff *skb = csk->cpl_abort_rpl; | ||
293 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; | ||
294 | |||
295 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
296 | "csk 0x%p,%u,0x%lx,%u, status %d.\n", | ||
297 | csk, csk->state, csk->flags, csk->tid, rst_status); | ||
298 | |||
299 | csk->cpl_abort_rpl = NULL; | ||
300 | skb->priority = CPL_PRIORITY_DATA; | ||
301 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | ||
302 | rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | ||
303 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); | ||
304 | rpl->cmd = rst_status; | ||
305 | cxgb3_ofld_send(csk->cdev->lldev, skb); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * CPL connection rx data ack: host -> | ||
310 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of | ||
311 | * credits sent. | ||
312 | */ | ||
313 | static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) | ||
314 | { | ||
315 | struct sk_buff *skb; | ||
316 | struct cpl_rx_data_ack *req; | ||
317 | u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); | ||
318 | |||
319 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | ||
320 | "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", | ||
321 | csk, csk->state, csk->flags, csk->tid, credits, dack); | ||
322 | |||
323 | skb = alloc_cpl(sizeof(*req), 0, GFP_ATOMIC); | ||
324 | if (!skb) { | ||
325 | pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); | ||
326 | return 0; | ||
327 | } | ||
328 | req = (struct cpl_rx_data_ack *)skb->head; | ||
329 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
330 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); | ||
331 | req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | | ||
332 | V_RX_CREDITS(credits)); | ||
333 | skb->priority = CPL_PRIORITY_ACK; | ||
334 | cxgb3_ofld_send(csk->cdev->lldev, skb); | ||
335 | return credits; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * CPL connection tx data: host -> | ||
340 | * | ||
341 | * Send iscsi PDU via TX_DATA CPL message. Returns the number of | ||
342 | * credits sent. | ||
343 | * Each TX_DATA consumes work request credit (wrs), so we need to keep track of | ||
344 | * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). | ||
345 | */ | ||
346 | |||
347 | static unsigned int wrlen __read_mostly; | ||
348 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | ||
349 | |||
350 | static void init_wr_tab(unsigned int wr_len) | ||
351 | { | ||
352 | int i; | ||
353 | |||
354 | if (skb_wrs[1]) /* already initialized */ | ||
355 | return; | ||
356 | for (i = 1; i < SKB_WR_LIST_SIZE; i++) { | ||
357 | int sgl_len = (3 * i) / 2 + (i & 1); | ||
358 | |||
359 | sgl_len += 3; | ||
360 | skb_wrs[i] = (sgl_len <= wr_len | ||
361 | ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); | ||
362 | } | ||
363 | wrlen = wr_len * 8; | ||
364 | } | ||
365 | |||
366 | static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, | ||
367 | int len, int req_completion) | ||
368 | { | ||
369 | struct tx_data_wr *req; | ||
370 | struct l2t_entry *l2t = csk->l2t; | ||
371 | |||
372 | skb_reset_transport_header(skb); | ||
373 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | ||
374 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | | ||
375 | (req_completion ? F_WR_COMPL : 0)); | ||
376 | req->wr_lo = htonl(V_WR_TID(csk->tid)); | ||
377 | /* len includes the length of any HW ULP additions */ | ||
378 | req->len = htonl(len); | ||
379 | /* V_TX_ULP_SUBMODE sets both the mode and submode */ | ||
380 | req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | | ||
381 | V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); | ||
382 | req->sndseq = htonl(csk->snd_nxt); | ||
383 | req->param = htonl(V_TX_PORT(l2t->smt_idx)); | ||
384 | |||
385 | if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { | ||
386 | req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | | ||
387 | V_TX_CPU_IDX(csk->rss_qid)); | ||
388 | /* sendbuffer is in units of 32KB. */ | ||
389 | req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); | ||
390 | cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); | ||
391 | } | ||
392 | } | ||
393 | |||
394 | /** | ||
395 | * push_tx_frames -- start transmit | ||
396 | * @c3cn: the offloaded connection | ||
397 | * @req_completion: request wr_ack or not | ||
398 | * | ||
399 | * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a | ||
400 | * connection's send queue and sends them on to T3. Must be called with the | ||
401 | * connection's lock held. Returns the amount of send buffer space that was | ||
402 | * freed as a result of sending queued data to T3. | ||
403 | */ | ||
404 | |||
405 | static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) | ||
406 | { | ||
407 | kfree_skb(skb); | ||
408 | } | ||
409 | |||
410 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) | ||
411 | { | ||
412 | int total_size = 0; | ||
413 | struct sk_buff *skb; | ||
414 | |||
415 | if (unlikely(csk->state < CTP_ESTABLISHED || | ||
416 | csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { | ||
417 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | ||
418 | "csk 0x%p,%u,0x%lx,%u, in closing state.\n", | ||
419 | csk, csk->state, csk->flags, csk->tid); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { | ||
424 | int len = skb->len; /* length before skb_push */ | ||
425 | int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); | ||
426 | int wrs_needed = skb_wrs[frags]; | ||
427 | |||
428 | if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) | ||
429 | wrs_needed = 1; | ||
430 | |||
431 | WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); | ||
432 | |||
433 | if (csk->wr_cred < wrs_needed) { | ||
434 | log_debug(1 << CXGBI_DBG_PDU_TX, | ||
435 | "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", | ||
436 | csk, skb->len, skb->data_len, frags, | ||
437 | wrs_needed, csk->wr_cred); | ||
438 | break; | ||
439 | } | ||
440 | |||
441 | __skb_unlink(skb, &csk->write_queue); | ||
442 | skb->priority = CPL_PRIORITY_DATA; | ||
443 | skb->csum = wrs_needed; /* remember this until the WR_ACK */ | ||
444 | csk->wr_cred -= wrs_needed; | ||
445 | csk->wr_una_cred += wrs_needed; | ||
446 | cxgbi_sock_enqueue_wr(csk, skb); | ||
447 | |||
448 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | ||
449 | "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " | ||
450 | "left %u, unack %u.\n", | ||
451 | csk, skb->len, skb->data_len, frags, skb->csum, | ||
452 | csk->wr_cred, csk->wr_una_cred); | ||
453 | |||
454 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { | ||
455 | if ((req_completion && | ||
456 | csk->wr_una_cred == wrs_needed) || | ||
457 | csk->wr_una_cred >= csk->wr_max_cred / 2) { | ||
458 | req_completion = 1; | ||
459 | csk->wr_una_cred = 0; | ||
460 | } | ||
461 | len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | ||
462 | make_tx_data_wr(csk, skb, len, req_completion); | ||
463 | csk->snd_nxt += len; | ||
464 | cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); | ||
465 | } | ||
466 | total_size += skb->truesize; | ||
467 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | ||
468 | "csk 0x%p, tid 0x%x, send skb 0x%p.\n", | ||
469 | csk, csk->tid, skb); | ||
470 | set_arp_failure_handler(skb, arp_failure_skb_discard); | ||
471 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | ||
472 | } | ||
473 | return total_size; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Process a CPL_ACT_ESTABLISH message: -> host | ||
478 | * Updates connection state from an active establish CPL message. Runs with | ||
479 | * the connection lock held. | ||
480 | */ | ||
481 | |||
482 | static inline void free_atid(struct cxgbi_sock *csk) | ||
483 | { | ||
484 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { | ||
485 | cxgb3_free_atid(csk->cdev->lldev, csk->atid); | ||
486 | cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); | ||
487 | cxgbi_sock_put(csk); | ||
488 | } | ||
489 | } | ||
490 | |||
491 | static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
492 | { | ||
493 | struct cxgbi_sock *csk = ctx; | ||
494 | struct cpl_act_establish *req = cplhdr(skb); | ||
495 | unsigned int tid = GET_TID(req); | ||
496 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | ||
497 | u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ | ||
498 | |||
499 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
500 | "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", | ||
501 | atid, atid, csk, csk->state, csk->flags, rcv_isn); | ||
502 | |||
503 | cxgbi_sock_get(csk); | ||
504 | cxgbi_sock_set_flag(csk, CTPF_HAS_TID); | ||
505 | csk->tid = tid; | ||
506 | cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); | ||
507 | |||
508 | free_atid(csk); | ||
509 | |||
510 | csk->rss_qid = G_QNUM(ntohs(skb->csum)); | ||
511 | |||
512 | spin_lock_bh(&csk->lock); | ||
513 | if (csk->retry_timer.function) { | ||
514 | del_timer(&csk->retry_timer); | ||
515 | csk->retry_timer.function = NULL; | ||
516 | } | ||
517 | |||
518 | if (unlikely(csk->state != CTP_ACTIVE_OPEN)) | ||
519 | pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", | ||
520 | csk, csk->state, csk->flags, csk->tid); | ||
521 | |||
522 | csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; | ||
523 | if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) | ||
524 | csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); | ||
525 | |||
526 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | ||
527 | |||
528 | if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) | ||
529 | /* upper layer has requested closing */ | ||
530 | send_abort_req(csk); | ||
531 | else { | ||
532 | if (skb_queue_len(&csk->write_queue)) | ||
533 | push_tx_frames(csk, 1); | ||
534 | cxgbi_conn_tx_open(csk); | ||
535 | } | ||
536 | |||
537 | spin_unlock_bh(&csk->lock); | ||
538 | __kfree_skb(skb); | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * Process a CPL_ACT_OPEN_RPL message: -> host | ||
544 | * Handle active open failures. | ||
545 | */ | ||
546 | static int act_open_rpl_status_to_errno(int status) | ||
547 | { | ||
548 | switch (status) { | ||
549 | case CPL_ERR_CONN_RESET: | ||
550 | return -ECONNREFUSED; | ||
551 | case CPL_ERR_ARP_MISS: | ||
552 | return -EHOSTUNREACH; | ||
553 | case CPL_ERR_CONN_TIMEDOUT: | ||
554 | return -ETIMEDOUT; | ||
555 | case CPL_ERR_TCAM_FULL: | ||
556 | return -ENOMEM; | ||
557 | case CPL_ERR_CONN_EXIST: | ||
558 | return -EADDRINUSE; | ||
559 | default: | ||
560 | return -EIO; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | static void act_open_retry_timer(unsigned long data) | ||
565 | { | ||
566 | struct sk_buff *skb; | ||
567 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | ||
568 | |||
569 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
570 | "csk 0x%p,%u,0x%lx,%u.\n", | ||
571 | csk, csk->state, csk->flags, csk->tid); | ||
572 | |||
573 | cxgbi_sock_get(csk); | ||
574 | spin_lock_bh(&csk->lock); | ||
575 | skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); | ||
576 | if (!skb) | ||
577 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | ||
578 | else { | ||
579 | skb->sk = (struct sock *)csk; | ||
580 | set_arp_failure_handler(skb, act_open_arp_failure); | ||
581 | send_act_open_req(csk, skb, csk->l2t); | ||
582 | } | ||
583 | spin_unlock_bh(&csk->lock); | ||
584 | cxgbi_sock_put(csk); | ||
585 | } | ||
586 | |||
587 | static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | ||
588 | { | ||
589 | struct cxgbi_sock *csk = ctx; | ||
590 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | ||
591 | |||
592 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
593 | "csk 0x%p,%u,0x%lx,%u, status %u.\n", | ||
594 | csk, csk->state, csk->flags, csk->atid, rpl->status); | ||
595 | |||
596 | if (rpl->status != CPL_ERR_TCAM_FULL && | ||
597 | rpl->status != CPL_ERR_CONN_EXIST && | ||
598 | rpl->status != CPL_ERR_ARP_MISS) | ||
599 | cxgb3_queue_tid_release(tdev, GET_TID(rpl)); | ||
600 | |||
601 | cxgbi_sock_get(csk); | ||
602 | spin_lock_bh(&csk->lock); | ||
603 | if (rpl->status == CPL_ERR_CONN_EXIST && | ||
604 | csk->retry_timer.function != act_open_retry_timer) { | ||
605 | csk->retry_timer.function = act_open_retry_timer; | ||
606 | mod_timer(&csk->retry_timer, jiffies + HZ / 2); | ||
607 | } else | ||
608 | cxgbi_sock_fail_act_open(csk, | ||
609 | act_open_rpl_status_to_errno(rpl->status)); | ||
610 | |||
611 | spin_unlock_bh(&csk->lock); | ||
612 | cxgbi_sock_put(csk); | ||
613 | __kfree_skb(skb); | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | /* | ||
618 | * Process PEER_CLOSE CPL messages: -> host | ||
619 | * Handle peer FIN. | ||
620 | */ | ||
621 | static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
622 | { | ||
623 | struct cxgbi_sock *csk = ctx; | ||
624 | |||
625 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
626 | "csk 0x%p,%u,0x%lx,%u.\n", | ||
627 | csk, csk->state, csk->flags, csk->tid); | ||
628 | |||
629 | cxgbi_sock_rcv_peer_close(csk); | ||
630 | __kfree_skb(skb); | ||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Process CLOSE_CONN_RPL CPL message: -> host | ||
636 | * Process a peer ACK to our FIN. | ||
637 | */ | ||
638 | static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, | ||
639 | void *ctx) | ||
640 | { | ||
641 | struct cxgbi_sock *csk = ctx; | ||
642 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | ||
643 | |||
644 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
645 | "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", | ||
646 | csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); | ||
647 | |||
648 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); | ||
649 | __kfree_skb(skb); | ||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * Process ABORT_REQ_RSS CPL message: -> host | ||
655 | * Process abort requests. If we are waiting for an ABORT_RPL we ignore this | ||
656 | * request except that we need to reply to it. | ||
657 | */ | ||
658 | |||
659 | static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, | ||
660 | int *need_rst) | ||
661 | { | ||
662 | switch (abort_reason) { | ||
663 | case CPL_ERR_BAD_SYN: /* fall through */ | ||
664 | case CPL_ERR_CONN_RESET: | ||
665 | return csk->state > CTP_ESTABLISHED ? | ||
666 | -EPIPE : -ECONNRESET; | ||
667 | case CPL_ERR_XMIT_TIMEDOUT: | ||
668 | case CPL_ERR_PERSIST_TIMEDOUT: | ||
669 | case CPL_ERR_FINWAIT2_TIMEDOUT: | ||
670 | case CPL_ERR_KEEPALIVE_TIMEDOUT: | ||
671 | return -ETIMEDOUT; | ||
672 | default: | ||
673 | return -EIO; | ||
674 | } | ||
675 | } | ||
676 | |||
677 | static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
678 | { | ||
679 | const struct cpl_abort_req_rss *req = cplhdr(skb); | ||
680 | struct cxgbi_sock *csk = ctx; | ||
681 | int rst_status = CPL_ABORT_NO_RST; | ||
682 | |||
683 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
684 | "csk 0x%p,%u,0x%lx,%u.\n", | ||
685 | csk, csk->state, csk->flags, csk->tid); | ||
686 | |||
687 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | ||
688 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) { | ||
689 | goto done; | ||
690 | } | ||
691 | |||
692 | cxgbi_sock_get(csk); | ||
693 | spin_lock_bh(&csk->lock); | ||
694 | |||
695 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { | ||
696 | cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); | ||
697 | cxgbi_sock_set_state(csk, CTP_ABORTING); | ||
698 | goto out; | ||
699 | } | ||
700 | |||
701 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); | ||
702 | send_abort_rpl(csk, rst_status); | ||
703 | |||
704 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | ||
705 | csk->err = abort_status_to_errno(csk, req->status, &rst_status); | ||
706 | cxgbi_sock_closed(csk); | ||
707 | } | ||
708 | |||
709 | out: | ||
710 | spin_unlock_bh(&csk->lock); | ||
711 | cxgbi_sock_put(csk); | ||
712 | done: | ||
713 | __kfree_skb(skb); | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | /* | ||
718 | * Process ABORT_RPL_RSS CPL message: -> host | ||
719 | * Process abort replies. We only process these messages if we anticipate | ||
720 | * them as the coordination between SW and HW in this area is somewhat lacking | ||
721 | * and sometimes we get ABORT_RPLs after we are done with the connection that | ||
722 | * originated the ABORT_REQ. | ||
723 | */ | ||
724 | static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
725 | { | ||
726 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | ||
727 | struct cxgbi_sock *csk = ctx; | ||
728 | |||
729 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
730 | "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", | ||
731 | rpl->status, csk, csk ? csk->state : 0, | ||
732 | csk ? csk->flags : 0UL); | ||
733 | /* | ||
734 | * Ignore replies to post-close aborts indicating that the abort was | ||
735 | * requested too late. These connections are terminated when we get | ||
736 | * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss | ||
737 | * arrives the TID is either no longer used or it has been recycled. | ||
738 | */ | ||
739 | if (rpl->status == CPL_ERR_ABORT_FAILED) | ||
740 | goto rel_skb; | ||
741 | /* | ||
742 | * Sometimes we've already closed the connection, e.g., a post-close | ||
743 | * abort races with ABORT_REQ_RSS, the latter frees the connection | ||
744 | * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, | ||
745 | * but FW turns the ABORT_REQ into a regular one and so we get | ||
746 | * ABORT_RPL_RSS with status 0 and no connection. | ||
747 | */ | ||
748 | if (csk) | ||
749 | cxgbi_sock_rcv_abort_rpl(csk); | ||
750 | rel_skb: | ||
751 | __kfree_skb(skb); | ||
752 | return 0; | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * Process RX_ISCSI_HDR CPL message: -> host | ||
757 | * Handle received PDUs, the payload could be DDP'ed. If not, the payload | ||
758 | * follow after the bhs. | ||
759 | */ | ||
760 | static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | ||
761 | { | ||
762 | struct cxgbi_sock *csk = ctx; | ||
763 | struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); | ||
764 | struct cpl_iscsi_hdr_norss data_cpl; | ||
765 | struct cpl_rx_data_ddp_norss ddp_cpl; | ||
766 | unsigned int hdr_len, data_len, status; | ||
767 | unsigned int len; | ||
768 | int err; | ||
769 | |||
770 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | ||
771 | "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", | ||
772 | csk, csk->state, csk->flags, csk->tid, skb, skb->len); | ||
773 | |||
774 | spin_lock_bh(&csk->lock); | ||
775 | |||
776 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { | ||
777 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
778 | "csk 0x%p,%u,0x%lx,%u, bad state.\n", | ||
779 | csk, csk->state, csk->flags, csk->tid); | ||
780 | if (csk->state != CTP_ABORTING) | ||
781 | goto abort_conn; | ||
782 | else | ||
783 | goto discard; | ||
784 | } | ||
785 | |||
786 | cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); | ||
787 | cxgbi_skcb_flags(skb) = 0; | ||
788 | |||
789 | skb_reset_transport_header(skb); | ||
790 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | ||
791 | |||
792 | len = hdr_len = ntohs(hdr_cpl->len); | ||
793 | /* msg coalesce is off or not enough data received */ | ||
794 | if (skb->len <= hdr_len) { | ||
795 | pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", | ||
796 | csk->cdev->ports[csk->port_id]->name, csk->tid, | ||
797 | skb->len, hdr_len); | ||
798 | goto abort_conn; | ||
799 | } | ||
800 | cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); | ||
801 | |||
802 | err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, | ||
803 | sizeof(ddp_cpl)); | ||
804 | if (err < 0) { | ||
805 | pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", | ||
806 | csk->cdev->ports[csk->port_id]->name, csk->tid, | ||
807 | skb->len, sizeof(ddp_cpl), err); | ||
808 | goto abort_conn; | ||
809 | } | ||
810 | |||
811 | cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); | ||
812 | cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); | ||
813 | cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | ||
814 | status = ntohl(ddp_cpl.ddp_status); | ||
815 | |||
816 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | ||
817 | "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", | ||
818 | csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); | ||
819 | |||
820 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) | ||
821 | cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); | ||
822 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) | ||
823 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); | ||
824 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) | ||
825 | cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); | ||
826 | |||
827 | if (skb->len > (hdr_len + sizeof(ddp_cpl))) { | ||
828 | err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); | ||
829 | if (err < 0) { | ||
830 | pr_err("%s: tid %u, cp %zu/%u failed %d.\n", | ||
831 | csk->cdev->ports[csk->port_id]->name, | ||
832 | csk->tid, sizeof(data_cpl), skb->len, err); | ||
833 | goto abort_conn; | ||
834 | } | ||
835 | data_len = ntohs(data_cpl.len); | ||
836 | log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, | ||
837 | "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", | ||
838 | skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); | ||
839 | len += sizeof(data_cpl) + data_len; | ||
840 | } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) | ||
841 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); | ||
842 | |||
843 | csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); | ||
844 | __pskb_trim(skb, len); | ||
845 | __skb_queue_tail(&csk->receive_queue, skb); | ||
846 | cxgbi_conn_pdu_ready(csk); | ||
847 | |||
848 | spin_unlock_bh(&csk->lock); | ||
849 | return 0; | ||
850 | |||
851 | abort_conn: | ||
852 | send_abort_req(csk); | ||
853 | discard: | ||
854 | spin_unlock_bh(&csk->lock); | ||
855 | __kfree_skb(skb); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * Process TX_DATA_ACK CPL messages: -> host | ||
861 | * Process an acknowledgment of WR completion. Advance snd_una and send the | ||
862 | * next batch of work requests from the write queue. | ||
863 | */ | ||
864 | static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | ||
865 | { | ||
866 | struct cxgbi_sock *csk = ctx; | ||
867 | struct cpl_wr_ack *hdr = cplhdr(skb); | ||
868 | |||
869 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | ||
870 | "csk 0x%p,%u,0x%lx,%u, cr %u.\n", | ||
871 | csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); | ||
872 | |||
873 | cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); | ||
874 | __kfree_skb(skb); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * for each connection, pre-allocate skbs needed for close/abort requests. So | ||
880 | * that we can service the request right away. | ||
881 | */ | ||
882 | static int alloc_cpls(struct cxgbi_sock *csk) | ||
883 | { | ||
884 | csk->cpl_close = alloc_cpl(sizeof(struct cpl_close_con_req), 0, | ||
885 | GFP_KERNEL); | ||
886 | if (!csk->cpl_close) | ||
887 | return -ENOMEM; | ||
888 | csk->cpl_abort_req = alloc_cpl(sizeof(struct cpl_abort_req), 0, | ||
889 | GFP_KERNEL); | ||
890 | if (!csk->cpl_abort_req) | ||
891 | goto free_cpl_skbs; | ||
892 | |||
893 | csk->cpl_abort_rpl = alloc_cpl(sizeof(struct cpl_abort_rpl), 0, | ||
894 | GFP_KERNEL); | ||
895 | if (!csk->cpl_abort_rpl) | ||
896 | goto free_cpl_skbs; | ||
897 | |||
898 | return 0; | ||
899 | |||
900 | free_cpl_skbs: | ||
901 | cxgbi_sock_free_cpl_skbs(csk); | ||
902 | return -ENOMEM; | ||
903 | } | ||
904 | |||
905 | /** | ||
906 | * release_offload_resources - release offload resource | ||
907 | * @c3cn: the offloaded iscsi tcp connection. | ||
908 | * Release resources held by an offload connection (TID, L2T entry, etc.) | ||
909 | */ | ||
910 | static void l2t_put(struct cxgbi_sock *csk) | ||
911 | { | ||
912 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | ||
913 | |||
914 | if (csk->l2t) { | ||
915 | l2t_release(L2DATA(t3dev), csk->l2t); | ||
916 | csk->l2t = NULL; | ||
917 | cxgbi_sock_put(csk); | ||
918 | } | ||
919 | } | ||
920 | |||
921 | static void release_offload_resources(struct cxgbi_sock *csk) | ||
922 | { | ||
923 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | ||
924 | |||
925 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
926 | "csk 0x%p,%u,0x%lx,%u.\n", | ||
927 | csk, csk->state, csk->flags, csk->tid); | ||
928 | |||
929 | csk->rss_qid = 0; | ||
930 | cxgbi_sock_free_cpl_skbs(csk); | ||
931 | |||
932 | if (csk->wr_cred != csk->wr_max_cred) { | ||
933 | cxgbi_sock_purge_wr_queue(csk); | ||
934 | cxgbi_sock_reset_wr_list(csk); | ||
935 | } | ||
936 | l2t_put(csk); | ||
937 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) | ||
938 | free_atid(csk); | ||
939 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { | ||
940 | cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); | ||
941 | cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); | ||
942 | cxgbi_sock_put(csk); | ||
943 | } | ||
944 | csk->dst = NULL; | ||
945 | csk->cdev = NULL; | ||
946 | } | ||
947 | |||
948 | static int init_act_open(struct cxgbi_sock *csk) | ||
949 | { | ||
950 | struct dst_entry *dst = csk->dst; | ||
951 | struct cxgbi_device *cdev = csk->cdev; | ||
952 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | ||
953 | struct net_device *ndev = cdev->ports[csk->port_id]; | ||
954 | struct sk_buff *skb = NULL; | ||
955 | |||
956 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
957 | "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); | ||
958 | |||
959 | csk->rss_qid = 0; | ||
960 | csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); | ||
961 | if (!csk->l2t) { | ||
962 | pr_err("NO l2t available.\n"); | ||
963 | return -EINVAL; | ||
964 | } | ||
965 | cxgbi_sock_get(csk); | ||
966 | |||
967 | csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); | ||
968 | if (csk->atid < 0) { | ||
969 | pr_err("NO atid available.\n"); | ||
970 | goto rel_resource; | ||
971 | } | ||
972 | cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); | ||
973 | cxgbi_sock_get(csk); | ||
974 | |||
975 | skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); | ||
976 | if (!skb) | ||
977 | goto rel_resource; | ||
978 | skb->sk = (struct sock *)csk; | ||
979 | set_arp_failure_handler(skb, act_open_arp_failure); | ||
980 | |||
981 | csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; | ||
982 | csk->wr_una_cred = 0; | ||
983 | csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); | ||
984 | cxgbi_sock_reset_wr_list(csk); | ||
985 | csk->err = 0; | ||
986 | |||
987 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); | ||
988 | send_act_open_req(csk, skb, csk->l2t); | ||
989 | return 0; | ||
990 | |||
991 | rel_resource: | ||
992 | if (skb) | ||
993 | __kfree_skb(skb); | ||
994 | return -EINVAL; | ||
995 | } | ||
996 | |||
997 | cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { | ||
998 | [CPL_ACT_ESTABLISH] = do_act_establish, | ||
999 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | ||
1000 | [CPL_PEER_CLOSE] = do_peer_close, | ||
1001 | [CPL_ABORT_REQ_RSS] = do_abort_req, | ||
1002 | [CPL_ABORT_RPL_RSS] = do_abort_rpl, | ||
1003 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, | ||
1004 | [CPL_TX_DMA_ACK] = do_wr_ack, | ||
1005 | [CPL_ISCSI_HDR] = do_iscsi_hdr, | ||
1006 | }; | ||
1007 | |||
1008 | /** | ||
1009 | * cxgb3i_ofld_init - allocate and initialize resources for each adapter found | ||
1010 | * @cdev: cxgbi adapter | ||
1011 | */ | ||
1012 | int cxgb3i_ofld_init(struct cxgbi_device *cdev) | ||
1013 | { | ||
1014 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | ||
1015 | struct adap_ports port; | ||
1016 | struct ofld_page_info rx_page_info; | ||
1017 | unsigned int wr_len; | ||
1018 | int rc; | ||
1019 | |||
1020 | if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || | ||
1021 | t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || | ||
1022 | t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { | ||
1023 | pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); | ||
1024 | return -EINVAL; | ||
1025 | } | ||
1026 | |||
1027 | if (cxgb3i_max_connect > CXGBI_MAX_CONN) | ||
1028 | cxgb3i_max_connect = CXGBI_MAX_CONN; | ||
1029 | |||
1030 | rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, | ||
1031 | cxgb3i_max_connect); | ||
1032 | if (rc < 0) | ||
1033 | return rc; | ||
1034 | |||
1035 | init_wr_tab(wr_len); | ||
1036 | cdev->csk_release_offload_resources = release_offload_resources; | ||
1037 | cdev->csk_push_tx_frames = push_tx_frames; | ||
1038 | cdev->csk_send_abort_req = send_abort_req; | ||
1039 | cdev->csk_send_close_req = send_close_req; | ||
1040 | cdev->csk_send_rx_credits = send_rx_credits; | ||
1041 | cdev->csk_alloc_cpls = alloc_cpls; | ||
1042 | cdev->csk_init_act_open = init_act_open; | ||
1043 | |||
1044 | pr_info("cdev 0x%p, offload up, added.\n", cdev); | ||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * functions to program the pagepod in h/w | ||
1050 | */ | ||
1051 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | ||
1052 | { | ||
1053 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | ||
1054 | |||
1055 | memset(req, 0, sizeof(*req)); | ||
1056 | |||
1057 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | ||
1058 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | ||
1059 | V_ULPTX_CMD(ULP_MEM_WRITE)); | ||
1060 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | ||
1061 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | ||
1062 | } | ||
1063 | |||
1064 | static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, | ||
1065 | unsigned int idx, unsigned int npods, | ||
1066 | struct cxgbi_gather_list *gl) | ||
1067 | { | ||
1068 | struct cxgbi_device *cdev = csk->cdev; | ||
1069 | struct cxgbi_ddp_info *ddp = cdev->ddp; | ||
1070 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
1071 | int i; | ||
1072 | |||
1073 | log_debug(1 << CXGBI_DBG_DDP, | ||
1074 | "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", | ||
1075 | csk, idx, npods, gl); | ||
1076 | |||
1077 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
1078 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
1079 | |||
1080 | /* hold on to the skb until we clear the ddp mapping */ | ||
1081 | skb_get(skb); | ||
1082 | |||
1083 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
1084 | cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + | ||
1085 | sizeof(struct ulp_mem_io)), | ||
1086 | hdr, gl, i * PPOD_PAGES_MAX); | ||
1087 | skb->priority = CPL_PRIORITY_CONTROL; | ||
1088 | cxgb3_ofld_send(cdev->lldev, skb); | ||
1089 | } | ||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, | ||
1094 | unsigned int idx, unsigned int npods) | ||
1095 | { | ||
1096 | struct cxgbi_device *cdev = chba->cdev; | ||
1097 | struct cxgbi_ddp_info *ddp = cdev->ddp; | ||
1098 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | ||
1099 | int i; | ||
1100 | |||
1101 | log_debug(1 << CXGBI_DBG_DDP, | ||
1102 | "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", | ||
1103 | cdev, idx, npods, tag); | ||
1104 | |||
1105 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | ||
1106 | struct sk_buff *skb = ddp->gl_skb[idx]; | ||
1107 | |||
1108 | if (!skb) { | ||
1109 | pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n", | ||
1110 | tag, idx, i, npods); | ||
1111 | continue; | ||
1112 | } | ||
1113 | ddp->gl_skb[idx] = NULL; | ||
1114 | memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE); | ||
1115 | ulp_mem_io_set_hdr(skb, pm_addr); | ||
1116 | skb->priority = CPL_PRIORITY_CONTROL; | ||
1117 | cxgb3_ofld_send(cdev->lldev, skb); | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt) | ||
1122 | { | ||
1123 | int i; | ||
1124 | |||
1125 | log_debug(1 << CXGBI_DBG_DDP, | ||
1126 | "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); | ||
1127 | |||
1128 | for (i = 0; i < cnt; i++, idx++) | ||
1129 | if (ddp->gl_skb[idx]) { | ||
1130 | kfree_skb(ddp->gl_skb[idx]); | ||
1131 | ddp->gl_skb[idx] = NULL; | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, | ||
1136 | int cnt, gfp_t gfp) | ||
1137 | { | ||
1138 | int i; | ||
1139 | |||
1140 | log_debug(1 << CXGBI_DBG_DDP, | ||
1141 | "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); | ||
1142 | |||
1143 | for (i = 0; i < cnt; i++) { | ||
1144 | struct sk_buff *skb = alloc_cpl(sizeof(struct ulp_mem_io) + | ||
1145 | PPOD_SIZE, 0, gfp); | ||
1146 | if (skb) { | ||
1147 | ddp->gl_skb[idx + i] = skb; | ||
1148 | } else { | ||
1149 | ddp_free_gl_skb(ddp, idx, i); | ||
1150 | return -ENOMEM; | ||
1151 | } | ||
1152 | } | ||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | ||
1157 | unsigned int tid, int pg_idx, bool reply) | ||
1158 | { | ||
1159 | struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, | ||
1160 | GFP_KERNEL); | ||
1161 | struct cpl_set_tcb_field *req; | ||
1162 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | ||
1163 | |||
1164 | log_debug(1 << CXGBI_DBG_DDP, | ||
1165 | "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); | ||
1166 | if (!skb) | ||
1167 | return -ENOMEM; | ||
1168 | |||
1169 | /* set up ulp submode and page size */ | ||
1170 | req = (struct cpl_set_tcb_field *)skb->head; | ||
1171 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1172 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
1173 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
1174 | req->cpu_idx = 0; | ||
1175 | req->word = htons(31); | ||
1176 | req->mask = cpu_to_be64(0xF0000000); | ||
1177 | req->val = cpu_to_be64(val << 28); | ||
1178 | skb->priority = CPL_PRIORITY_CONTROL; | ||
1179 | |||
1180 | cxgb3_ofld_send(csk->cdev->lldev, skb); | ||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | /** | ||
1185 | * cxgb3i_setup_conn_digest - setup conn. digest setting | ||
1186 | * @csk: cxgb tcp socket | ||
1187 | * @tid: connection id | ||
1188 | * @hcrc: header digest enabled | ||
1189 | * @dcrc: data digest enabled | ||
1190 | * @reply: request reply from h/w | ||
1191 | * set up the iscsi digest settings for a connection identified by tid | ||
1192 | */ | ||
1193 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | ||
1194 | int hcrc, int dcrc, int reply) | ||
1195 | { | ||
1196 | struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, | ||
1197 | GFP_KERNEL); | ||
1198 | struct cpl_set_tcb_field *req; | ||
1199 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | ||
1200 | |||
1201 | log_debug(1 << CXGBI_DBG_DDP, | ||
1202 | "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); | ||
1203 | if (!skb) | ||
1204 | return -ENOMEM; | ||
1205 | |||
1206 | /* set up ulp submode and page size */ | ||
1207 | req = (struct cpl_set_tcb_field *)skb->head; | ||
1208 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
1209 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | ||
1210 | req->reply = V_NO_REPLY(reply ? 0 : 1); | ||
1211 | req->cpu_idx = 0; | ||
1212 | req->word = htons(31); | ||
1213 | req->mask = cpu_to_be64(0x0F000000); | ||
1214 | req->val = cpu_to_be64(val << 24); | ||
1215 | skb->priority = CPL_PRIORITY_CONTROL; | ||
1216 | |||
1217 | cxgb3_ofld_send(csk->cdev->lldev, skb); | ||
1218 | return 0; | ||
1219 | } | ||
1220 | |||
1221 | /** | ||
1222 | * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource | ||
1223 | * @cdev: cxgb3i adapter | ||
1224 | * release all the resource held by the ddp pagepod manager for a given | ||
1225 | * adapter if needed | ||
1226 | */ | ||
1227 | |||
1228 | static void t3_ddp_cleanup(struct cxgbi_device *cdev) | ||
1229 | { | ||
1230 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | ||
1231 | |||
1232 | if (cxgbi_ddp_cleanup(cdev)) { | ||
1233 | pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); | ||
1234 | tdev->ulp_iscsi = NULL; | ||
1235 | } | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * ddp_init - initialize the cxgb3 adapter's ddp resource | ||
1240 | * @cdev: cxgb3i adapter | ||
1241 | * initialize the ddp pagepod manager for a given adapter | ||
1242 | */ | ||
1243 | static int cxgb3i_ddp_init(struct cxgbi_device *cdev) | ||
1244 | { | ||
1245 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | ||
1246 | struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; | ||
1247 | struct ulp_iscsi_info uinfo; | ||
1248 | unsigned int pgsz_factor[4]; | ||
1249 | int err; | ||
1250 | |||
1251 | if (ddp) { | ||
1252 | kref_get(&ddp->refcnt); | ||
1253 | pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | ||
1254 | tdev, tdev->ulp_iscsi); | ||
1255 | cdev->ddp = ddp; | ||
1256 | return -EALREADY; | ||
1257 | } | ||
1258 | |||
1259 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | ||
1260 | if (err < 0) { | ||
1261 | pr_err("%s, failed to get iscsi param err=%d.\n", | ||
1262 | tdev->name, err); | ||
1263 | return err; | ||
1264 | } | ||
1265 | |||
1266 | err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, | ||
1267 | uinfo.max_txsz, uinfo.max_rxsz); | ||
1268 | if (err < 0) | ||
1269 | return err; | ||
1270 | |||
1271 | ddp = cdev->ddp; | ||
1272 | |||
1273 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | ||
1274 | cxgbi_ddp_page_size_factor(pgsz_factor); | ||
1275 | uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); | ||
1276 | |||
1277 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | ||
1278 | if (err < 0) { | ||
1279 | pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", | ||
1280 | tdev->name, err); | ||
1281 | cxgbi_ddp_cleanup(cdev); | ||
1282 | return err; | ||
1283 | } | ||
1284 | tdev->ulp_iscsi = ddp; | ||
1285 | |||
1286 | cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb; | ||
1287 | cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb; | ||
1288 | cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; | ||
1289 | cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; | ||
1290 | cdev->csk_ddp_set = ddp_set_map; | ||
1291 | cdev->csk_ddp_clear = ddp_clear_map; | ||
1292 | |||
1293 | pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " | ||
1294 | "%u/%u.\n", | ||
1295 | tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, | ||
1296 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | ||
1297 | ddp->max_rxsz, uinfo.max_rxsz); | ||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static void cxgb3i_dev_close(struct t3cdev *t3dev) | ||
1302 | { | ||
1303 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | ||
1304 | |||
1305 | if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { | ||
1306 | pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); | ||
1307 | return; | ||
1308 | } | ||
1309 | |||
1310 | cxgbi_device_unregister(cdev); | ||
1311 | } | ||
1312 | |||
1313 | /** | ||
1314 | * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings | ||
1315 | * @t3dev: t3cdev adapter | ||
1316 | */ | ||
1317 | static void cxgb3i_dev_open(struct t3cdev *t3dev) | ||
1318 | { | ||
1319 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | ||
1320 | struct adapter *adapter = tdev2adap(t3dev); | ||
1321 | int i, err; | ||
1322 | |||
1323 | if (cdev) { | ||
1324 | pr_info("0x%p, updating.\n", cdev); | ||
1325 | return; | ||
1326 | } | ||
1327 | |||
1328 | cdev = cxgbi_device_register(0, adapter->params.nports); | ||
1329 | if (!cdev) { | ||
1330 | pr_warn("device 0x%p register failed.\n", t3dev); | ||
1331 | return; | ||
1332 | } | ||
1333 | |||
1334 | cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; | ||
1335 | cdev->lldev = t3dev; | ||
1336 | cdev->pdev = adapter->pdev; | ||
1337 | cdev->ports = adapter->port; | ||
1338 | cdev->nports = adapter->params.nports; | ||
1339 | cdev->mtus = adapter->params.mtus; | ||
1340 | cdev->nmtus = NMTUS; | ||
1341 | cdev->snd_win = cxgb3i_snd_win; | ||
1342 | cdev->rcv_win = cxgb3i_rcv_win; | ||
1343 | cdev->rx_credit_thres = cxgb3i_rx_credit_thres; | ||
1344 | cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; | ||
1345 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); | ||
1346 | cdev->dev_ddp_cleanup = t3_ddp_cleanup; | ||
1347 | cdev->itp = &cxgb3i_iscsi_transport; | ||
1348 | |||
1349 | err = cxgb3i_ddp_init(cdev); | ||
1350 | if (err) { | ||
1351 | pr_info("0x%p ddp init failed\n", cdev); | ||
1352 | goto err_out; | ||
1353 | } | ||
1354 | |||
1355 | err = cxgb3i_ofld_init(cdev); | ||
1356 | if (err) { | ||
1357 | pr_info("0x%p offload init failed\n", cdev); | ||
1358 | goto err_out; | ||
1359 | } | ||
1360 | |||
1361 | err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, | ||
1362 | &cxgb3i_host_template, cxgb3i_stt); | ||
1363 | if (err) | ||
1364 | goto err_out; | ||
1365 | |||
1366 | for (i = 0; i < cdev->nports; i++) | ||
1367 | cdev->hbas[i]->ipv4addr = | ||
1368 | cxgb3i_get_private_ipv4addr(cdev->ports[i]); | ||
1369 | |||
1370 | pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", | ||
1371 | cdev, cdev ? cdev->flags : 0, t3dev, err); | ||
1372 | return; | ||
1373 | |||
1374 | err_out: | ||
1375 | cxgbi_device_unregister(cdev); | ||
1376 | } | ||
1377 | |||
1378 | static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) | ||
1379 | { | ||
1380 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | ||
1381 | |||
1382 | log_debug(1 << CXGBI_DBG_TOE, | ||
1383 | "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", | ||
1384 | t3dev, cdev, event, port); | ||
1385 | if (!cdev) | ||
1386 | return; | ||
1387 | |||
1388 | switch (event) { | ||
1389 | case OFFLOAD_STATUS_DOWN: | ||
1390 | cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; | ||
1391 | break; | ||
1392 | case OFFLOAD_STATUS_UP: | ||
1393 | cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; | ||
1394 | break; | ||
1395 | } | ||
1396 | } | ||
1397 | |||
1398 | /** | ||
1399 | * cxgb3i_init_module - module init entry point | ||
1400 | * | ||
1401 | * initialize any driver wide global data structures and register itself | ||
1402 | * with the cxgb3 module | ||
1403 | */ | ||
1404 | static int __init cxgb3i_init_module(void) | ||
1405 | { | ||
1406 | int rc; | ||
1407 | |||
1408 | printk(KERN_INFO "%s", version); | ||
1409 | |||
1410 | rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); | ||
1411 | if (rc < 0) | ||
1412 | return rc; | ||
1413 | |||
1414 | cxgb3_register_client(&t3_client); | ||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | /** | ||
1419 | * cxgb3i_exit_module - module cleanup/exit entry point | ||
1420 | * | ||
1421 | * go through the driver hba list and for each hba, release any resource held. | ||
1422 | * and unregisters iscsi transport and the cxgb3 module | ||
1423 | */ | ||
1424 | static void __exit cxgb3i_exit_module(void) | ||
1425 | { | ||
1426 | cxgb3_unregister_client(&t3_client); | ||
1427 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); | ||
1428 | cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); | ||
1429 | } | ||
1430 | |||
1431 | module_init(cxgb3i_init_module); | ||
1432 | module_exit(cxgb3i_exit_module); | ||
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h new file mode 100644 index 000000000000..5f5e3394b594 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * cxgb3i.h: Chelsio S3xx iSCSI driver. | ||
3 | * | ||
4 | * Copyright (c) 2008 Chelsio Communications, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation. | ||
9 | * | ||
10 | * Written by: Karen Xie (kxie@chelsio.com) | ||
11 | */ | ||
12 | |||
13 | #ifndef __CXGB3I_H__ | ||
14 | #define __CXGB3I_H__ | ||
15 | |||
16 | #define CXGB3I_SCSI_HOST_QDEPTH 1024 | ||
17 | #define CXGB3I_MAX_LUN 512 | ||
18 | #define ISCSI_PDU_NONPAYLOAD_MAX \ | ||
19 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE) | ||
20 | |||
21 | /*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ | ||
22 | #define CXGB3I_TX_HEADER_LEN \ | ||
23 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) | ||
24 | |||
25 | extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; | ||
26 | |||
27 | #define cxgb3i_get_private_ipv4addr(ndev) \ | ||
28 | (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) | ||
29 | #define cxgb3i_set_private_ipv4addr(ndev, addr) \ | ||
30 | (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr | ||
31 | |||
32 | struct cpl_iscsi_hdr_norss { | ||
33 | union opcode_tid ot; | ||
34 | u16 pdu_len_ddp; | ||
35 | u16 len; | ||
36 | u32 seq; | ||
37 | u16 urg; | ||
38 | u8 rsvd; | ||
39 | u8 status; | ||
40 | }; | ||
41 | |||
42 | struct cpl_rx_data_ddp_norss { | ||
43 | union opcode_tid ot; | ||
44 | u16 urg; | ||
45 | u16 len; | ||
46 | u32 seq; | ||
47 | u32 nxt_seq; | ||
48 | u32 ulp_crc; | ||
49 | u32 ddp_status; | ||
50 | }; | ||
51 | #endif | ||