diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-15 21:58:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-15 21:58:04 -0400 |
commit | 89a93f2f4834f8c126e8d9dd6b368d0b9e21ec3d (patch) | |
tree | e731456fec0cab1225ad3e806dc8d3efefa0a78b /drivers/infiniband/ulp | |
parent | 260eddf4391f162a69d1d163729249635fa7a78f (diff) | |
parent | fe9233fb6914a0eb20166c967e3020f7f0fba2c9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (102 commits)
[SCSI] scsi_dh: fix kconfig related build errors
[SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of container_of
[SCSI] scsi_cmnd.h: remove double inclusion of linux/blkdev.h
[SCSI] make struct scsi_{host,target}_type static
[SCSI] fix locking in host use of blk_plug_device()
[SCSI] zfcp: Cleanup external header file
[SCSI] zfcp: Cleanup code in zfcp_erp.c
[SCSI] zfcp: zfcp_fsf cleanup.
[SCSI] zfcp: consolidate sysfs things into one file.
[SCSI] zfcp: Cleanup of code in zfcp_aux.c
[SCSI] zfcp: Cleanup of code in zfcp_scsi.c
[SCSI] zfcp: Move status accessors from zfcp to SCSI include file.
[SCSI] zfcp: Small QDIO cleanups
[SCSI] zfcp: Adapter reopen for large number of unsolicited status
[SCSI] zfcp: Fix error checking for ELS ADISC requests
[SCSI] zfcp: wait until adapter is finished with ERP during auto-port
[SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver
[SCSI] sg: Add target reset support
[SCSI] lib: Add support for the T10 (SCSI) Data Integrity Field CRC
[SCSI] sd: Move scsi_disk() accessor function to sd.h
...
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 356 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 44 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 209 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 77 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 28 |
5 files changed, 380 insertions, 334 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 356fac6d105a..5a1cf2580e16 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -71,6 +71,10 @@ | |||
71 | 71 | ||
72 | #include "iscsi_iser.h" | 72 | #include "iscsi_iser.h" |
73 | 73 | ||
74 | static struct scsi_host_template iscsi_iser_sht; | ||
75 | static struct iscsi_transport iscsi_iser_transport; | ||
76 | static struct scsi_transport_template *iscsi_iser_scsi_transport; | ||
77 | |||
74 | static unsigned int iscsi_max_lun = 512; | 78 | static unsigned int iscsi_max_lun = 512; |
75 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); | 79 | module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); |
76 | 80 | ||
@@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, | |||
91 | struct iscsi_hdr *hdr, char *rx_data, int rx_data_len) | 95 | struct iscsi_hdr *hdr, char *rx_data, int rx_data_len) |
92 | { | 96 | { |
93 | int rc = 0; | 97 | int rc = 0; |
94 | uint32_t ret_itt; | ||
95 | int datalen; | 98 | int datalen; |
96 | int ahslen; | 99 | int ahslen; |
97 | 100 | ||
@@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn, | |||
107 | /* read AHS */ | 110 | /* read AHS */ |
108 | ahslen = hdr->hlength * 4; | 111 | ahslen = hdr->hlength * 4; |
109 | 112 | ||
110 | /* verify itt (itt encoding: age+cid+itt) */ | 113 | rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); |
111 | rc = iscsi_verify_itt(conn, hdr, &ret_itt); | ||
112 | |||
113 | if (!rc) | ||
114 | rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); | ||
115 | |||
116 | if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) | 114 | if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) |
117 | goto error; | 115 | goto error; |
118 | 116 | ||
@@ -123,25 +121,33 @@ error: | |||
123 | 121 | ||
124 | 122 | ||
125 | /** | 123 | /** |
126 | * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands | 124 | * iscsi_iser_task_init - Initialize task |
125 | * @task: iscsi task | ||
127 | * | 126 | * |
128 | **/ | 127 | * Initialize the task for the scsi command or mgmt command. |
128 | */ | ||
129 | static int | 129 | static int |
130 | iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) | 130 | iscsi_iser_task_init(struct iscsi_task *task) |
131 | { | 131 | { |
132 | struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; | 132 | struct iscsi_iser_conn *iser_conn = task->conn->dd_data; |
133 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 133 | struct iscsi_iser_task *iser_task = task->dd_data; |
134 | |||
135 | /* mgmt task */ | ||
136 | if (!task->sc) { | ||
137 | iser_task->desc.data = task->data; | ||
138 | return 0; | ||
139 | } | ||
134 | 140 | ||
135 | iser_ctask->command_sent = 0; | 141 | iser_task->command_sent = 0; |
136 | iser_ctask->iser_conn = iser_conn; | 142 | iser_task->iser_conn = iser_conn; |
137 | iser_ctask_rdma_init(iser_ctask); | 143 | iser_task_rdma_init(iser_task); |
138 | return 0; | 144 | return 0; |
139 | } | 145 | } |
140 | 146 | ||
141 | /** | 147 | /** |
142 | * iscsi_mtask_xmit - xmit management(immediate) task | 148 | * iscsi_iser_mtask_xmit - xmit management(immediate) task |
143 | * @conn: iscsi connection | 149 | * @conn: iscsi connection |
144 | * @mtask: task management task | 150 | * @task: task management task |
145 | * | 151 | * |
146 | * Notes: | 152 | * Notes: |
147 | * The function can return -EAGAIN in which case caller must | 153 | * The function can return -EAGAIN in which case caller must |
@@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) | |||
150 | * | 156 | * |
151 | **/ | 157 | **/ |
152 | static int | 158 | static int |
153 | iscsi_iser_mtask_xmit(struct iscsi_conn *conn, | 159 | iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) |
154 | struct iscsi_mgmt_task *mtask) | ||
155 | { | 160 | { |
156 | int error = 0; | 161 | int error = 0; |
157 | 162 | ||
158 | debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); | 163 | debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); |
159 | 164 | ||
160 | error = iser_send_control(conn, mtask); | 165 | error = iser_send_control(conn, task); |
161 | 166 | ||
162 | /* since iser xmits control with zero copy, mtasks can not be recycled | 167 | /* since iser xmits control with zero copy, tasks can not be recycled |
163 | * right after sending them. | 168 | * right after sending them. |
164 | * The recycling scheme is based on whether a response is expected | 169 | * The recycling scheme is based on whether a response is expected |
165 | * - if yes, the mtask is recycled at iscsi_complete_pdu | 170 | * - if yes, the task is recycled at iscsi_complete_pdu |
166 | * - if no, the mtask is recycled at iser_snd_completion | 171 | * - if no, the task is recycled at iser_snd_completion |
167 | */ | 172 | */ |
168 | if (error && error != -ENOBUFS) | 173 | if (error && error != -ENOBUFS) |
169 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 174 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
@@ -172,97 +177,86 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, | |||
172 | } | 177 | } |
173 | 178 | ||
174 | static int | 179 | static int |
175 | iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn, | 180 | iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, |
176 | struct iscsi_cmd_task *ctask) | 181 | struct iscsi_task *task) |
177 | { | 182 | { |
178 | struct iscsi_data hdr; | 183 | struct iscsi_data hdr; |
179 | int error = 0; | 184 | int error = 0; |
180 | 185 | ||
181 | /* Send data-out PDUs while there's still unsolicited data to send */ | 186 | /* Send data-out PDUs while there's still unsolicited data to send */ |
182 | while (ctask->unsol_count > 0) { | 187 | while (task->unsol_count > 0) { |
183 | iscsi_prep_unsolicit_data_pdu(ctask, &hdr); | 188 | iscsi_prep_unsolicit_data_pdu(task, &hdr); |
184 | debug_scsi("Sending data-out: itt 0x%x, data count %d\n", | 189 | debug_scsi("Sending data-out: itt 0x%x, data count %d\n", |
185 | hdr.itt, ctask->data_count); | 190 | hdr.itt, task->data_count); |
186 | 191 | ||
187 | /* the buffer description has been passed with the command */ | 192 | /* the buffer description has been passed with the command */ |
188 | /* Send the command */ | 193 | /* Send the command */ |
189 | error = iser_send_data_out(conn, ctask, &hdr); | 194 | error = iser_send_data_out(conn, task, &hdr); |
190 | if (error) { | 195 | if (error) { |
191 | ctask->unsol_datasn--; | 196 | task->unsol_datasn--; |
192 | goto iscsi_iser_ctask_xmit_unsol_data_exit; | 197 | goto iscsi_iser_task_xmit_unsol_data_exit; |
193 | } | 198 | } |
194 | ctask->unsol_count -= ctask->data_count; | 199 | task->unsol_count -= task->data_count; |
195 | debug_scsi("Need to send %d more as data-out PDUs\n", | 200 | debug_scsi("Need to send %d more as data-out PDUs\n", |
196 | ctask->unsol_count); | 201 | task->unsol_count); |
197 | } | 202 | } |
198 | 203 | ||
199 | iscsi_iser_ctask_xmit_unsol_data_exit: | 204 | iscsi_iser_task_xmit_unsol_data_exit: |
200 | return error; | 205 | return error; |
201 | } | 206 | } |
202 | 207 | ||
203 | static int | 208 | static int |
204 | iscsi_iser_ctask_xmit(struct iscsi_conn *conn, | 209 | iscsi_iser_task_xmit(struct iscsi_task *task) |
205 | struct iscsi_cmd_task *ctask) | ||
206 | { | 210 | { |
207 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 211 | struct iscsi_conn *conn = task->conn; |
212 | struct iscsi_iser_task *iser_task = task->dd_data; | ||
208 | int error = 0; | 213 | int error = 0; |
209 | 214 | ||
210 | if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { | 215 | if (!task->sc) |
211 | BUG_ON(scsi_bufflen(ctask->sc) == 0); | 216 | return iscsi_iser_mtask_xmit(conn, task); |
217 | |||
218 | if (task->sc->sc_data_direction == DMA_TO_DEVICE) { | ||
219 | BUG_ON(scsi_bufflen(task->sc) == 0); | ||
212 | 220 | ||
213 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", | 221 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", |
214 | ctask->itt, scsi_bufflen(ctask->sc), | 222 | task->itt, scsi_bufflen(task->sc), |
215 | ctask->imm_count, ctask->unsol_count); | 223 | task->imm_count, task->unsol_count); |
216 | } | 224 | } |
217 | 225 | ||
218 | debug_scsi("ctask deq [cid %d itt 0x%x]\n", | 226 | debug_scsi("task deq [cid %d itt 0x%x]\n", |
219 | conn->id, ctask->itt); | 227 | conn->id, task->itt); |
220 | 228 | ||
221 | /* Send the cmd PDU */ | 229 | /* Send the cmd PDU */ |
222 | if (!iser_ctask->command_sent) { | 230 | if (!iser_task->command_sent) { |
223 | error = iser_send_command(conn, ctask); | 231 | error = iser_send_command(conn, task); |
224 | if (error) | 232 | if (error) |
225 | goto iscsi_iser_ctask_xmit_exit; | 233 | goto iscsi_iser_task_xmit_exit; |
226 | iser_ctask->command_sent = 1; | 234 | iser_task->command_sent = 1; |
227 | } | 235 | } |
228 | 236 | ||
229 | /* Send unsolicited data-out PDU(s) if necessary */ | 237 | /* Send unsolicited data-out PDU(s) if necessary */ |
230 | if (ctask->unsol_count) | 238 | if (task->unsol_count) |
231 | error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); | 239 | error = iscsi_iser_task_xmit_unsol_data(conn, task); |
232 | 240 | ||
233 | iscsi_iser_ctask_xmit_exit: | 241 | iscsi_iser_task_xmit_exit: |
234 | if (error && error != -ENOBUFS) | 242 | if (error && error != -ENOBUFS) |
235 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 243 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
236 | return error; | 244 | return error; |
237 | } | 245 | } |
238 | 246 | ||
239 | static void | 247 | static void |
240 | iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 248 | iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) |
241 | { | 249 | { |
242 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 250 | struct iscsi_iser_task *iser_task = task->dd_data; |
243 | 251 | ||
244 | if (iser_ctask->status == ISER_TASK_STATUS_STARTED) { | 252 | /* mgmt tasks do not need special cleanup */ |
245 | iser_ctask->status = ISER_TASK_STATUS_COMPLETED; | 253 | if (!task->sc) |
246 | iser_ctask_rdma_finalize(iser_ctask); | 254 | return; |
247 | } | ||
248 | } | ||
249 | |||
250 | static struct iser_conn * | ||
251 | iscsi_iser_ib_conn_lookup(__u64 ep_handle) | ||
252 | { | ||
253 | struct iser_conn *ib_conn; | ||
254 | struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle; | ||
255 | 255 | ||
256 | mutex_lock(&ig.connlist_mutex); | 256 | if (iser_task->status == ISER_TASK_STATUS_STARTED) { |
257 | list_for_each_entry(ib_conn, &ig.connlist, conn_list) { | 257 | iser_task->status = ISER_TASK_STATUS_COMPLETED; |
258 | if (ib_conn == uib_conn) { | 258 | iser_task_rdma_finalize(iser_task); |
259 | mutex_unlock(&ig.connlist_mutex); | ||
260 | return ib_conn; | ||
261 | } | ||
262 | } | 259 | } |
263 | mutex_unlock(&ig.connlist_mutex); | ||
264 | iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle); | ||
265 | return NULL; | ||
266 | } | 260 | } |
267 | 261 | ||
268 | static struct iscsi_cls_conn * | 262 | static struct iscsi_cls_conn * |
@@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
272 | struct iscsi_cls_conn *cls_conn; | 266 | struct iscsi_cls_conn *cls_conn; |
273 | struct iscsi_iser_conn *iser_conn; | 267 | struct iscsi_iser_conn *iser_conn; |
274 | 268 | ||
275 | cls_conn = iscsi_conn_setup(cls_session, conn_idx); | 269 | cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx); |
276 | if (!cls_conn) | 270 | if (!cls_conn) |
277 | return NULL; | 271 | return NULL; |
278 | conn = cls_conn->dd_data; | 272 | conn = cls_conn->dd_data; |
@@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
283 | */ | 277 | */ |
284 | conn->max_recv_dlength = 128; | 278 | conn->max_recv_dlength = 128; |
285 | 279 | ||
286 | iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); | 280 | iser_conn = conn->dd_data; |
287 | if (!iser_conn) | ||
288 | goto conn_alloc_fail; | ||
289 | |||
290 | /* currently this is the only field which need to be initiated */ | ||
291 | rwlock_init(&iser_conn->lock); | ||
292 | |||
293 | conn->dd_data = iser_conn; | 281 | conn->dd_data = iser_conn; |
294 | iser_conn->iscsi_conn = conn; | 282 | iser_conn->iscsi_conn = conn; |
295 | 283 | ||
296 | return cls_conn; | 284 | return cls_conn; |
297 | |||
298 | conn_alloc_fail: | ||
299 | iscsi_conn_teardown(cls_conn); | ||
300 | return NULL; | ||
301 | } | 285 | } |
302 | 286 | ||
303 | static void | 287 | static void |
@@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
305 | { | 289 | { |
306 | struct iscsi_conn *conn = cls_conn->dd_data; | 290 | struct iscsi_conn *conn = cls_conn->dd_data; |
307 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 291 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
292 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
308 | 293 | ||
309 | iscsi_conn_teardown(cls_conn); | 294 | iscsi_conn_teardown(cls_conn); |
310 | if (iser_conn->ib_conn) | 295 | /* |
311 | iser_conn->ib_conn->iser_conn = NULL; | 296 | * Userspace will normally call the stop callback and |
312 | kfree(iser_conn); | 297 | * already have freed the ib_conn, but if it goofed up then |
298 | * we free it here. | ||
299 | */ | ||
300 | if (ib_conn) { | ||
301 | ib_conn->iser_conn = NULL; | ||
302 | iser_conn_put(ib_conn); | ||
303 | } | ||
313 | } | 304 | } |
314 | 305 | ||
315 | static int | 306 | static int |
@@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
320 | struct iscsi_conn *conn = cls_conn->dd_data; | 311 | struct iscsi_conn *conn = cls_conn->dd_data; |
321 | struct iscsi_iser_conn *iser_conn; | 312 | struct iscsi_iser_conn *iser_conn; |
322 | struct iser_conn *ib_conn; | 313 | struct iser_conn *ib_conn; |
314 | struct iscsi_endpoint *ep; | ||
323 | int error; | 315 | int error; |
324 | 316 | ||
325 | error = iscsi_conn_bind(cls_session, cls_conn, is_leading); | 317 | error = iscsi_conn_bind(cls_session, cls_conn, is_leading); |
@@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
328 | 320 | ||
329 | /* the transport ep handle comes from user space so it must be | 321 | /* the transport ep handle comes from user space so it must be |
330 | * verified against the global ib connections list */ | 322 | * verified against the global ib connections list */ |
331 | ib_conn = iscsi_iser_ib_conn_lookup(transport_eph); | 323 | ep = iscsi_lookup_endpoint(transport_eph); |
332 | if (!ib_conn) { | 324 | if (!ep) { |
333 | iser_err("can't bind eph %llx\n", | 325 | iser_err("can't bind eph %llx\n", |
334 | (unsigned long long)transport_eph); | 326 | (unsigned long long)transport_eph); |
335 | return -EINVAL; | 327 | return -EINVAL; |
336 | } | 328 | } |
329 | ib_conn = ep->dd_data; | ||
330 | |||
337 | /* binds the iSER connection retrieved from the previously | 331 | /* binds the iSER connection retrieved from the previously |
338 | * connected ep_handle to the iSCSI layer connection. exchanges | 332 | * connected ep_handle to the iSCSI layer connection. exchanges |
339 | * connection pointers */ | 333 | * connection pointers */ |
@@ -341,10 +335,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
341 | iser_conn = conn->dd_data; | 335 | iser_conn = conn->dd_data; |
342 | ib_conn->iser_conn = iser_conn; | 336 | ib_conn->iser_conn = iser_conn; |
343 | iser_conn->ib_conn = ib_conn; | 337 | iser_conn->ib_conn = ib_conn; |
338 | iser_conn_get(ib_conn); | ||
339 | return 0; | ||
340 | } | ||
344 | 341 | ||
345 | conn->recv_lock = &iser_conn->lock; | 342 | static void |
343 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | ||
344 | { | ||
345 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
346 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | ||
347 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
346 | 348 | ||
347 | return 0; | 349 | /* |
350 | * Userspace may have goofed up and not bound the connection or | ||
351 | * might have only partially setup the connection. | ||
352 | */ | ||
353 | if (ib_conn) { | ||
354 | iscsi_conn_stop(cls_conn, flag); | ||
355 | /* | ||
356 | * There is no unbind event so the stop callback | ||
357 | * must release the ref from the bind. | ||
358 | */ | ||
359 | iser_conn_put(ib_conn); | ||
360 | } | ||
361 | iser_conn->ib_conn = NULL; | ||
348 | } | 362 | } |
349 | 363 | ||
350 | static int | 364 | static int |
@@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | |||
360 | return iscsi_conn_start(cls_conn); | 374 | return iscsi_conn_start(cls_conn); |
361 | } | 375 | } |
362 | 376 | ||
363 | static struct iscsi_transport iscsi_iser_transport; | 377 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
378 | { | ||
379 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | ||
380 | |||
381 | iscsi_host_remove(shost); | ||
382 | iscsi_host_free(shost); | ||
383 | } | ||
364 | 384 | ||
365 | static struct iscsi_cls_session * | 385 | static struct iscsi_cls_session * |
366 | iscsi_iser_session_create(struct iscsi_transport *iscsit, | 386 | iscsi_iser_session_create(struct iscsi_endpoint *ep, |
367 | struct scsi_transport_template *scsit, | 387 | uint16_t cmds_max, uint16_t qdepth, |
368 | uint16_t cmds_max, uint16_t qdepth, | 388 | uint32_t initial_cmdsn, uint32_t *hostno) |
369 | uint32_t initial_cmdsn, uint32_t *hostno) | ||
370 | { | 389 | { |
371 | struct iscsi_cls_session *cls_session; | 390 | struct iscsi_cls_session *cls_session; |
372 | struct iscsi_session *session; | 391 | struct iscsi_session *session; |
392 | struct Scsi_Host *shost; | ||
373 | int i; | 393 | int i; |
374 | uint32_t hn; | 394 | struct iscsi_task *task; |
375 | struct iscsi_cmd_task *ctask; | 395 | struct iscsi_iser_task *iser_task; |
376 | struct iscsi_mgmt_task *mtask; | 396 | struct iser_conn *ib_conn; |
377 | struct iscsi_iser_cmd_task *iser_ctask; | 397 | |
378 | struct iser_desc *desc; | 398 | shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN); |
399 | if (!shost) | ||
400 | return NULL; | ||
401 | shost->transportt = iscsi_iser_scsi_transport; | ||
402 | shost->max_lun = iscsi_max_lun; | ||
403 | shost->max_id = 0; | ||
404 | shost->max_channel = 0; | ||
405 | shost->max_cmd_len = 16; | ||
406 | |||
407 | /* | ||
408 | * older userspace tools (before 2.0-870) did not pass us | ||
409 | * the leading conn's ep so this will be NULL; | ||
410 | */ | ||
411 | if (ep) | ||
412 | ib_conn = ep->dd_data; | ||
413 | |||
414 | if (iscsi_host_add(shost, | ||
415 | ep ? ib_conn->device->ib_device->dma_device : NULL)) | ||
416 | goto free_host; | ||
417 | *hostno = shost->host_no; | ||
379 | 418 | ||
380 | /* | 419 | /* |
381 | * we do not support setting can_queue cmd_per_lun from userspace yet | 420 | * we do not support setting can_queue cmd_per_lun from userspace yet |
382 | * because we preallocate so many resources | 421 | * because we preallocate so many resources |
383 | */ | 422 | */ |
384 | cls_session = iscsi_session_setup(iscsit, scsit, | 423 | cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, |
385 | ISCSI_DEF_XMIT_CMDS_MAX, | 424 | ISCSI_DEF_XMIT_CMDS_MAX, |
386 | ISCSI_MAX_CMD_PER_LUN, | 425 | sizeof(struct iscsi_iser_task), |
387 | sizeof(struct iscsi_iser_cmd_task), | 426 | initial_cmdsn, 0); |
388 | sizeof(struct iser_desc), | ||
389 | initial_cmdsn, &hn); | ||
390 | if (!cls_session) | 427 | if (!cls_session) |
391 | return NULL; | 428 | goto remove_host; |
392 | 429 | session = cls_session->dd_data; | |
393 | *hostno = hn; | ||
394 | session = class_to_transport_session(cls_session); | ||
395 | 430 | ||
431 | shost->can_queue = session->scsi_cmds_max; | ||
396 | /* libiscsi setup itts, data and pool so just set desc fields */ | 432 | /* libiscsi setup itts, data and pool so just set desc fields */ |
397 | for (i = 0; i < session->cmds_max; i++) { | 433 | for (i = 0; i < session->cmds_max; i++) { |
398 | ctask = session->cmds[i]; | 434 | task = session->cmds[i]; |
399 | iser_ctask = ctask->dd_data; | 435 | iser_task = task->dd_data; |
400 | ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header; | 436 | task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header; |
401 | ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header); | 437 | task->hdr_max = sizeof(iser_task->desc.iscsi_header); |
402 | } | ||
403 | |||
404 | for (i = 0; i < session->mgmtpool_max; i++) { | ||
405 | mtask = session->mgmt_cmds[i]; | ||
406 | desc = mtask->dd_data; | ||
407 | mtask->hdr = &desc->iscsi_header; | ||
408 | desc->data = mtask->data; | ||
409 | } | 438 | } |
410 | |||
411 | return cls_session; | 439 | return cls_session; |
440 | |||
441 | remove_host: | ||
442 | iscsi_host_remove(shost); | ||
443 | free_host: | ||
444 | iscsi_host_free(shost); | ||
445 | return NULL; | ||
412 | } | 446 | } |
413 | 447 | ||
414 | static int | 448 | static int |
@@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s | |||
481 | stats->custom[3].value = conn->fmr_unalign_cnt; | 515 | stats->custom[3].value = conn->fmr_unalign_cnt; |
482 | } | 516 | } |
483 | 517 | ||
484 | static int | 518 | static struct iscsi_endpoint * |
485 | iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking, | 519 | iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) |
486 | __u64 *ep_handle) | ||
487 | { | 520 | { |
488 | int err; | 521 | int err; |
489 | struct iser_conn *ib_conn; | 522 | struct iser_conn *ib_conn; |
523 | struct iscsi_endpoint *ep; | ||
490 | 524 | ||
491 | err = iser_conn_init(&ib_conn); | 525 | ep = iscsi_create_endpoint(sizeof(*ib_conn)); |
492 | if (err) | 526 | if (!ep) |
493 | goto out; | 527 | return ERR_PTR(-ENOMEM); |
494 | 528 | ||
495 | err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking); | 529 | ib_conn = ep->dd_data; |
496 | if (!err) | 530 | ib_conn->ep = ep; |
497 | *ep_handle = (__u64)(unsigned long)ib_conn; | 531 | iser_conn_init(ib_conn); |
498 | 532 | ||
499 | out: | 533 | err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, |
500 | return err; | 534 | non_blocking); |
535 | if (err) { | ||
536 | iscsi_destroy_endpoint(ep); | ||
537 | return ERR_PTR(err); | ||
538 | } | ||
539 | return ep; | ||
501 | } | 540 | } |
502 | 541 | ||
503 | static int | 542 | static int |
504 | iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) | 543 | iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) |
505 | { | 544 | { |
506 | struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); | 545 | struct iser_conn *ib_conn; |
507 | int rc; | 546 | int rc; |
508 | 547 | ||
509 | if (!ib_conn) | 548 | ib_conn = ep->dd_data; |
510 | return -EINVAL; | ||
511 | |||
512 | rc = wait_event_interruptible_timeout(ib_conn->wait, | 549 | rc = wait_event_interruptible_timeout(ib_conn->wait, |
513 | ib_conn->state == ISER_CONN_UP, | 550 | ib_conn->state == ISER_CONN_UP, |
514 | msecs_to_jiffies(timeout_ms)); | 551 | msecs_to_jiffies(timeout_ms)); |
@@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) | |||
530 | } | 567 | } |
531 | 568 | ||
532 | static void | 569 | static void |
533 | iscsi_iser_ep_disconnect(__u64 ep_handle) | 570 | iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) |
534 | { | 571 | { |
535 | struct iser_conn *ib_conn; | 572 | struct iser_conn *ib_conn; |
536 | 573 | ||
537 | ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); | 574 | ib_conn = ep->dd_data; |
538 | if (!ib_conn) | 575 | if (ib_conn->iser_conn) |
539 | return; | 576 | /* |
577 | * Must suspend xmit path if the ep is bound to the | ||
578 | * iscsi_conn, so we know we are not accessing the ib_conn | ||
579 | * when we free it. | ||
580 | * | ||
581 | * This may not be bound if the ep poll failed. | ||
582 | */ | ||
583 | iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); | ||
584 | |||
540 | 585 | ||
541 | iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); | 586 | iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); |
542 | iser_conn_terminate(ib_conn); | 587 | iser_conn_terminate(ib_conn); |
@@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = { | |||
547 | .name = "iSCSI Initiator over iSER, v." DRV_VER, | 592 | .name = "iSCSI Initiator over iSER, v." DRV_VER, |
548 | .queuecommand = iscsi_queuecommand, | 593 | .queuecommand = iscsi_queuecommand, |
549 | .change_queue_depth = iscsi_change_queue_depth, | 594 | .change_queue_depth = iscsi_change_queue_depth, |
550 | .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, | ||
551 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, | 595 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, |
552 | .max_sectors = 1024, | 596 | .max_sectors = 1024, |
553 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, | 597 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, |
@@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
581 | ISCSI_USERNAME | ISCSI_PASSWORD | | 625 | ISCSI_USERNAME | ISCSI_PASSWORD | |
582 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | | 626 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | |
583 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | | 627 | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | |
584 | ISCSI_PING_TMO | ISCSI_RECV_TMO, | 628 | ISCSI_PING_TMO | ISCSI_RECV_TMO | |
629 | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, | ||
585 | .host_param_mask = ISCSI_HOST_HWADDRESS | | 630 | .host_param_mask = ISCSI_HOST_HWADDRESS | |
586 | ISCSI_HOST_NETDEV_NAME | | 631 | ISCSI_HOST_NETDEV_NAME | |
587 | ISCSI_HOST_INITIATOR_NAME, | 632 | ISCSI_HOST_INITIATOR_NAME, |
588 | .host_template = &iscsi_iser_sht, | ||
589 | .conndata_size = sizeof(struct iscsi_conn), | ||
590 | .max_lun = ISCSI_ISER_MAX_LUN, | ||
591 | .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN, | ||
592 | /* session management */ | 633 | /* session management */ |
593 | .create_session = iscsi_iser_session_create, | 634 | .create_session = iscsi_iser_session_create, |
594 | .destroy_session = iscsi_session_teardown, | 635 | .destroy_session = iscsi_iser_session_destroy, |
595 | /* connection management */ | 636 | /* connection management */ |
596 | .create_conn = iscsi_iser_conn_create, | 637 | .create_conn = iscsi_iser_conn_create, |
597 | .bind_conn = iscsi_iser_conn_bind, | 638 | .bind_conn = iscsi_iser_conn_bind, |
@@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
600 | .get_conn_param = iscsi_conn_get_param, | 641 | .get_conn_param = iscsi_conn_get_param, |
601 | .get_session_param = iscsi_session_get_param, | 642 | .get_session_param = iscsi_session_get_param, |
602 | .start_conn = iscsi_iser_conn_start, | 643 | .start_conn = iscsi_iser_conn_start, |
603 | .stop_conn = iscsi_conn_stop, | 644 | .stop_conn = iscsi_iser_conn_stop, |
604 | /* iscsi host params */ | 645 | /* iscsi host params */ |
605 | .get_host_param = iscsi_host_get_param, | 646 | .get_host_param = iscsi_host_get_param, |
606 | .set_host_param = iscsi_host_set_param, | 647 | .set_host_param = iscsi_host_set_param, |
607 | /* IO */ | 648 | /* IO */ |
608 | .send_pdu = iscsi_conn_send_pdu, | 649 | .send_pdu = iscsi_conn_send_pdu, |
609 | .get_stats = iscsi_iser_conn_get_stats, | 650 | .get_stats = iscsi_iser_conn_get_stats, |
610 | .init_cmd_task = iscsi_iser_cmd_init, | 651 | .init_task = iscsi_iser_task_init, |
611 | .xmit_cmd_task = iscsi_iser_ctask_xmit, | 652 | .xmit_task = iscsi_iser_task_xmit, |
612 | .xmit_mgmt_task = iscsi_iser_mtask_xmit, | 653 | .cleanup_task = iscsi_iser_cleanup_task, |
613 | .cleanup_cmd_task = iscsi_iser_cleanup_ctask, | ||
614 | /* recovery */ | 654 | /* recovery */ |
615 | .session_recovery_timedout = iscsi_session_recovery_timedout, | 655 | .session_recovery_timedout = iscsi_session_recovery_timedout, |
616 | 656 | ||
@@ -630,8 +670,6 @@ static int __init iser_init(void) | |||
630 | return -EINVAL; | 670 | return -EINVAL; |
631 | } | 671 | } |
632 | 672 | ||
633 | iscsi_iser_transport.max_lun = iscsi_max_lun; | ||
634 | |||
635 | memset(&ig, 0, sizeof(struct iser_global)); | 673 | memset(&ig, 0, sizeof(struct iser_global)); |
636 | 674 | ||
637 | ig.desc_cache = kmem_cache_create("iser_descriptors", | 675 | ig.desc_cache = kmem_cache_create("iser_descriptors", |
@@ -647,7 +685,9 @@ static int __init iser_init(void) | |||
647 | mutex_init(&ig.connlist_mutex); | 685 | mutex_init(&ig.connlist_mutex); |
648 | INIT_LIST_HEAD(&ig.connlist); | 686 | INIT_LIST_HEAD(&ig.connlist); |
649 | 687 | ||
650 | if (!iscsi_register_transport(&iscsi_iser_transport)) { | 688 | iscsi_iser_scsi_transport = iscsi_register_transport( |
689 | &iscsi_iser_transport); | ||
690 | if (!iscsi_iser_scsi_transport) { | ||
651 | iser_err("iscsi_register_transport failed\n"); | 691 | iser_err("iscsi_register_transport failed\n"); |
652 | err = -EINVAL; | 692 | err = -EINVAL; |
653 | goto register_transport_failure; | 693 | goto register_transport_failure; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0e10703cf59e..81a82628a5f1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -94,7 +94,6 @@ | |||
94 | /* support upto 512KB in one RDMA */ | 94 | /* support upto 512KB in one RDMA */ |
95 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) | 95 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) |
96 | #define ISCSI_ISER_MAX_LUN 256 | 96 | #define ISCSI_ISER_MAX_LUN 256 |
97 | #define ISCSI_ISER_MAX_CMD_LEN 16 | ||
98 | 97 | ||
99 | /* QP settings */ | 98 | /* QP settings */ |
100 | /* Maximal bounds on received asynchronous PDUs */ | 99 | /* Maximal bounds on received asynchronous PDUs */ |
@@ -172,7 +171,8 @@ struct iser_data_buf { | |||
172 | /* fwd declarations */ | 171 | /* fwd declarations */ |
173 | struct iser_device; | 172 | struct iser_device; |
174 | struct iscsi_iser_conn; | 173 | struct iscsi_iser_conn; |
175 | struct iscsi_iser_cmd_task; | 174 | struct iscsi_iser_task; |
175 | struct iscsi_endpoint; | ||
176 | 176 | ||
177 | struct iser_mem_reg { | 177 | struct iser_mem_reg { |
178 | u32 lkey; | 178 | u32 lkey; |
@@ -196,7 +196,7 @@ struct iser_regd_buf { | |||
196 | #define MAX_REGD_BUF_VECTOR_LEN 2 | 196 | #define MAX_REGD_BUF_VECTOR_LEN 2 |
197 | 197 | ||
198 | struct iser_dto { | 198 | struct iser_dto { |
199 | struct iscsi_iser_cmd_task *ctask; | 199 | struct iscsi_iser_task *task; |
200 | struct iser_conn *ib_conn; | 200 | struct iser_conn *ib_conn; |
201 | int notify_enable; | 201 | int notify_enable; |
202 | 202 | ||
@@ -240,7 +240,9 @@ struct iser_device { | |||
240 | 240 | ||
241 | struct iser_conn { | 241 | struct iser_conn { |
242 | struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ | 242 | struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ |
243 | struct iscsi_endpoint *ep; | ||
243 | enum iser_ib_conn_state state; /* rdma connection state */ | 244 | enum iser_ib_conn_state state; /* rdma connection state */ |
245 | atomic_t refcount; | ||
244 | spinlock_t lock; /* used for state changes */ | 246 | spinlock_t lock; /* used for state changes */ |
245 | struct iser_device *device; /* device context */ | 247 | struct iser_device *device; /* device context */ |
246 | struct rdma_cm_id *cma_id; /* CMA ID */ | 248 | struct rdma_cm_id *cma_id; /* CMA ID */ |
@@ -259,11 +261,9 @@ struct iser_conn { | |||
259 | struct iscsi_iser_conn { | 261 | struct iscsi_iser_conn { |
260 | struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ | 262 | struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ |
261 | struct iser_conn *ib_conn; /* iSER IB conn */ | 263 | struct iser_conn *ib_conn; /* iSER IB conn */ |
262 | |||
263 | rwlock_t lock; | ||
264 | }; | 264 | }; |
265 | 265 | ||
266 | struct iscsi_iser_cmd_task { | 266 | struct iscsi_iser_task { |
267 | struct iser_desc desc; | 267 | struct iser_desc desc; |
268 | struct iscsi_iser_conn *iser_conn; | 268 | struct iscsi_iser_conn *iser_conn; |
269 | enum iser_task_status status; | 269 | enum iser_task_status status; |
@@ -296,22 +296,26 @@ extern int iser_debug_level; | |||
296 | /* allocate connection resources needed for rdma functionality */ | 296 | /* allocate connection resources needed for rdma functionality */ |
297 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); | 297 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); |
298 | 298 | ||
299 | int iser_send_control(struct iscsi_conn *conn, | 299 | int iser_send_control(struct iscsi_conn *conn, |
300 | struct iscsi_mgmt_task *mtask); | 300 | struct iscsi_task *task); |
301 | 301 | ||
302 | int iser_send_command(struct iscsi_conn *conn, | 302 | int iser_send_command(struct iscsi_conn *conn, |
303 | struct iscsi_cmd_task *ctask); | 303 | struct iscsi_task *task); |
304 | 304 | ||
305 | int iser_send_data_out(struct iscsi_conn *conn, | 305 | int iser_send_data_out(struct iscsi_conn *conn, |
306 | struct iscsi_cmd_task *ctask, | 306 | struct iscsi_task *task, |
307 | struct iscsi_data *hdr); | 307 | struct iscsi_data *hdr); |
308 | 308 | ||
309 | void iscsi_iser_recv(struct iscsi_conn *conn, | 309 | void iscsi_iser_recv(struct iscsi_conn *conn, |
310 | struct iscsi_hdr *hdr, | 310 | struct iscsi_hdr *hdr, |
311 | char *rx_data, | 311 | char *rx_data, |
312 | int rx_data_len); | 312 | int rx_data_len); |
313 | 313 | ||
314 | int iser_conn_init(struct iser_conn **ib_conn); | 314 | void iser_conn_init(struct iser_conn *ib_conn); |
315 | |||
316 | void iser_conn_get(struct iser_conn *ib_conn); | ||
317 | |||
318 | void iser_conn_put(struct iser_conn *ib_conn); | ||
315 | 319 | ||
316 | void iser_conn_terminate(struct iser_conn *ib_conn); | 320 | void iser_conn_terminate(struct iser_conn *ib_conn); |
317 | 321 | ||
@@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc, | |||
320 | 324 | ||
321 | void iser_snd_completion(struct iser_desc *desc); | 325 | void iser_snd_completion(struct iser_desc *desc); |
322 | 326 | ||
323 | void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask); | 327 | void iser_task_rdma_init(struct iscsi_iser_task *task); |
324 | 328 | ||
325 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask); | 329 | void iser_task_rdma_finalize(struct iscsi_iser_task *task); |
326 | 330 | ||
327 | void iser_dto_buffs_release(struct iser_dto *dto); | 331 | void iser_dto_buffs_release(struct iser_dto *dto); |
328 | 332 | ||
@@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device, | |||
332 | struct iser_regd_buf *regd_buf, | 336 | struct iser_regd_buf *regd_buf, |
333 | enum dma_data_direction direction); | 337 | enum dma_data_direction direction); |
334 | 338 | ||
335 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, | 339 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, |
336 | enum iser_data_dir cmd_dir); | 340 | enum iser_data_dir cmd_dir); |
337 | 341 | ||
338 | int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask, | 342 | int iser_reg_rdma_mem(struct iscsi_iser_task *task, |
339 | enum iser_data_dir cmd_dir); | 343 | enum iser_data_dir cmd_dir); |
340 | 344 | ||
341 | int iser_connect(struct iser_conn *ib_conn, | 345 | int iser_connect(struct iser_conn *ib_conn, |
@@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc); | |||
355 | int iser_conn_state_comp(struct iser_conn *ib_conn, | 359 | int iser_conn_state_comp(struct iser_conn *ib_conn, |
356 | enum iser_ib_conn_state comp); | 360 | enum iser_ib_conn_state comp); |
357 | 361 | ||
358 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | 362 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
359 | struct iser_data_buf *data, | 363 | struct iser_data_buf *data, |
360 | enum iser_data_dir iser_dir, | 364 | enum iser_data_dir iser_dir, |
361 | enum dma_data_direction dma_dir); | 365 | enum dma_data_direction dma_dir); |
362 | 366 | ||
363 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); | 367 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); |
364 | #endif | 368 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 31ad498bdc51..cdd283189047 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto, | |||
64 | 64 | ||
65 | /* Register user buffer memory and initialize passive rdma | 65 | /* Register user buffer memory and initialize passive rdma |
66 | * dto descriptor. Total data size is stored in | 66 | * dto descriptor. Total data size is stored in |
67 | * iser_ctask->data[ISER_DIR_IN].data_len | 67 | * iser_task->data[ISER_DIR_IN].data_len |
68 | */ | 68 | */ |
69 | static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, | 69 | static int iser_prepare_read_cmd(struct iscsi_task *task, |
70 | unsigned int edtl) | 70 | unsigned int edtl) |
71 | 71 | ||
72 | { | 72 | { |
73 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 73 | struct iscsi_iser_task *iser_task = task->dd_data; |
74 | struct iser_regd_buf *regd_buf; | 74 | struct iser_regd_buf *regd_buf; |
75 | int err; | 75 | int err; |
76 | struct iser_hdr *hdr = &iser_ctask->desc.iser_header; | 76 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
77 | struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; | 77 | struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; |
78 | 78 | ||
79 | err = iser_dma_map_task_data(iser_ctask, | 79 | err = iser_dma_map_task_data(iser_task, |
80 | buf_in, | 80 | buf_in, |
81 | ISER_DIR_IN, | 81 | ISER_DIR_IN, |
82 | DMA_FROM_DEVICE); | 82 | DMA_FROM_DEVICE); |
83 | if (err) | 83 | if (err) |
84 | return err; | 84 | return err; |
85 | 85 | ||
86 | if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { | 86 | if (edtl > iser_task->data[ISER_DIR_IN].data_len) { |
87 | iser_err("Total data length: %ld, less than EDTL: " | 87 | iser_err("Total data length: %ld, less than EDTL: " |
88 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", | 88 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", |
89 | iser_ctask->data[ISER_DIR_IN].data_len, edtl, | 89 | iser_task->data[ISER_DIR_IN].data_len, edtl, |
90 | ctask->itt, iser_ctask->iser_conn); | 90 | task->itt, iser_task->iser_conn); |
91 | return -EINVAL; | 91 | return -EINVAL; |
92 | } | 92 | } |
93 | 93 | ||
94 | err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); | 94 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); |
95 | if (err) { | 95 | if (err) { |
96 | iser_err("Failed to set up Data-IN RDMA\n"); | 96 | iser_err("Failed to set up Data-IN RDMA\n"); |
97 | return err; | 97 | return err; |
98 | } | 98 | } |
99 | regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; | 99 | regd_buf = &iser_task->rdma_regd[ISER_DIR_IN]; |
100 | 100 | ||
101 | hdr->flags |= ISER_RSV; | 101 | hdr->flags |= ISER_RSV; |
102 | hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); | 102 | hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); |
103 | hdr->read_va = cpu_to_be64(regd_buf->reg.va); | 103 | hdr->read_va = cpu_to_be64(regd_buf->reg.va); |
104 | 104 | ||
105 | iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", | 105 | iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", |
106 | ctask->itt, regd_buf->reg.rkey, | 106 | task->itt, regd_buf->reg.rkey, |
107 | (unsigned long long)regd_buf->reg.va); | 107 | (unsigned long long)regd_buf->reg.va); |
108 | 108 | ||
109 | return 0; | 109 | return 0; |
@@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, | |||
111 | 111 | ||
112 | /* Register user buffer memory and initialize passive rdma | 112 | /* Register user buffer memory and initialize passive rdma |
113 | * dto descriptor. Total data size is stored in | 113 | * dto descriptor. Total data size is stored in |
114 | * ctask->data[ISER_DIR_OUT].data_len | 114 | * task->data[ISER_DIR_OUT].data_len |
115 | */ | 115 | */ |
116 | static int | 116 | static int |
117 | iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, | 117 | iser_prepare_write_cmd(struct iscsi_task *task, |
118 | unsigned int imm_sz, | 118 | unsigned int imm_sz, |
119 | unsigned int unsol_sz, | 119 | unsigned int unsol_sz, |
120 | unsigned int edtl) | 120 | unsigned int edtl) |
121 | { | 121 | { |
122 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 122 | struct iscsi_iser_task *iser_task = task->dd_data; |
123 | struct iser_regd_buf *regd_buf; | 123 | struct iser_regd_buf *regd_buf; |
124 | int err; | 124 | int err; |
125 | struct iser_dto *send_dto = &iser_ctask->desc.dto; | 125 | struct iser_dto *send_dto = &iser_task->desc.dto; |
126 | struct iser_hdr *hdr = &iser_ctask->desc.iser_header; | 126 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
127 | struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; | 127 | struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; |
128 | 128 | ||
129 | err = iser_dma_map_task_data(iser_ctask, | 129 | err = iser_dma_map_task_data(iser_task, |
130 | buf_out, | 130 | buf_out, |
131 | ISER_DIR_OUT, | 131 | ISER_DIR_OUT, |
132 | DMA_TO_DEVICE); | 132 | DMA_TO_DEVICE); |
133 | if (err) | 133 | if (err) |
134 | return err; | 134 | return err; |
135 | 135 | ||
136 | if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { | 136 | if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { |
137 | iser_err("Total data length: %ld, less than EDTL: %d, " | 137 | iser_err("Total data length: %ld, less than EDTL: %d, " |
138 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", | 138 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", |
139 | iser_ctask->data[ISER_DIR_OUT].data_len, | 139 | iser_task->data[ISER_DIR_OUT].data_len, |
140 | edtl, ctask->itt, ctask->conn); | 140 | edtl, task->itt, task->conn); |
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | } | 142 | } |
143 | 143 | ||
144 | err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); | 144 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); |
145 | if (err != 0) { | 145 | if (err != 0) { |
146 | iser_err("Failed to register write cmd RDMA mem\n"); | 146 | iser_err("Failed to register write cmd RDMA mem\n"); |
147 | return err; | 147 | return err; |
148 | } | 148 | } |
149 | 149 | ||
150 | regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; | 150 | regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; |
151 | 151 | ||
152 | if (unsol_sz < edtl) { | 152 | if (unsol_sz < edtl) { |
153 | hdr->flags |= ISER_WSV; | 153 | hdr->flags |= ISER_WSV; |
@@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, | |||
156 | 156 | ||
157 | iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " | 157 | iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " |
158 | "VA:%#llX + unsol:%d\n", | 158 | "VA:%#llX + unsol:%d\n", |
159 | ctask->itt, regd_buf->reg.rkey, | 159 | task->itt, regd_buf->reg.rkey, |
160 | (unsigned long long)regd_buf->reg.va, unsol_sz); | 160 | (unsigned long long)regd_buf->reg.va, unsol_sz); |
161 | } | 161 | } |
162 | 162 | ||
163 | if (imm_sz > 0) { | 163 | if (imm_sz > 0) { |
164 | iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", | 164 | iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", |
165 | ctask->itt, imm_sz); | 165 | task->itt, imm_sz); |
166 | iser_dto_add_regd_buff(send_dto, | 166 | iser_dto_add_regd_buff(send_dto, |
167 | regd_buf, | 167 | regd_buf, |
168 | 0, | 168 | 0, |
@@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task) | |||
314 | /** | 314 | /** |
315 | * iser_send_command - send command PDU | 315 | * iser_send_command - send command PDU |
316 | */ | 316 | */ |
317 | int iser_send_command(struct iscsi_conn *conn, | 317 | int iser_send_command(struct iscsi_conn *conn, |
318 | struct iscsi_cmd_task *ctask) | 318 | struct iscsi_task *task) |
319 | { | 319 | { |
320 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 320 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
321 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 321 | struct iscsi_iser_task *iser_task = task->dd_data; |
322 | struct iser_dto *send_dto = NULL; | 322 | struct iser_dto *send_dto = NULL; |
323 | unsigned long edtl; | 323 | unsigned long edtl; |
324 | int err = 0; | 324 | int err = 0; |
325 | struct iser_data_buf *data_buf; | 325 | struct iser_data_buf *data_buf; |
326 | 326 | ||
327 | struct iscsi_cmd *hdr = ctask->hdr; | 327 | struct iscsi_cmd *hdr = task->hdr; |
328 | struct scsi_cmnd *sc = ctask->sc; | 328 | struct scsi_cmnd *sc = task->sc; |
329 | 329 | ||
330 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { | 330 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { |
331 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); | 331 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); |
332 | return -EPERM; | 332 | return -EPERM; |
333 | } | 333 | } |
334 | if (iser_check_xmit(conn, ctask)) | 334 | if (iser_check_xmit(conn, task)) |
335 | return -ENOBUFS; | 335 | return -ENOBUFS; |
336 | 336 | ||
337 | edtl = ntohl(hdr->data_length); | 337 | edtl = ntohl(hdr->data_length); |
338 | 338 | ||
339 | /* build the tx desc regd header and add it to the tx desc dto */ | 339 | /* build the tx desc regd header and add it to the tx desc dto */ |
340 | iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; | 340 | iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; |
341 | send_dto = &iser_ctask->desc.dto; | 341 | send_dto = &iser_task->desc.dto; |
342 | send_dto->ctask = iser_ctask; | 342 | send_dto->task = iser_task; |
343 | iser_create_send_desc(iser_conn, &iser_ctask->desc); | 343 | iser_create_send_desc(iser_conn, &iser_task->desc); |
344 | 344 | ||
345 | if (hdr->flags & ISCSI_FLAG_CMD_READ) | 345 | if (hdr->flags & ISCSI_FLAG_CMD_READ) |
346 | data_buf = &iser_ctask->data[ISER_DIR_IN]; | 346 | data_buf = &iser_task->data[ISER_DIR_IN]; |
347 | else | 347 | else |
348 | data_buf = &iser_ctask->data[ISER_DIR_OUT]; | 348 | data_buf = &iser_task->data[ISER_DIR_OUT]; |
349 | 349 | ||
350 | if (scsi_sg_count(sc)) { /* using a scatter list */ | 350 | if (scsi_sg_count(sc)) { /* using a scatter list */ |
351 | data_buf->buf = scsi_sglist(sc); | 351 | data_buf->buf = scsi_sglist(sc); |
@@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn *conn, | |||
355 | data_buf->data_len = scsi_bufflen(sc); | 355 | data_buf->data_len = scsi_bufflen(sc); |
356 | 356 | ||
357 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { | 357 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
358 | err = iser_prepare_read_cmd(ctask, edtl); | 358 | err = iser_prepare_read_cmd(task, edtl); |
359 | if (err) | 359 | if (err) |
360 | goto send_command_error; | 360 | goto send_command_error; |
361 | } | 361 | } |
362 | if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { | 362 | if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { |
363 | err = iser_prepare_write_cmd(ctask, | 363 | err = iser_prepare_write_cmd(task, |
364 | ctask->imm_count, | 364 | task->imm_count, |
365 | ctask->imm_count + | 365 | task->imm_count + |
366 | ctask->unsol_count, | 366 | task->unsol_count, |
367 | edtl); | 367 | edtl); |
368 | if (err) | 368 | if (err) |
369 | goto send_command_error; | 369 | goto send_command_error; |
@@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn *conn, | |||
378 | goto send_command_error; | 378 | goto send_command_error; |
379 | } | 379 | } |
380 | 380 | ||
381 | iser_ctask->status = ISER_TASK_STATUS_STARTED; | 381 | iser_task->status = ISER_TASK_STATUS_STARTED; |
382 | 382 | ||
383 | err = iser_post_send(&iser_ctask->desc); | 383 | err = iser_post_send(&iser_task->desc); |
384 | if (!err) | 384 | if (!err) |
385 | return 0; | 385 | return 0; |
386 | 386 | ||
387 | send_command_error: | 387 | send_command_error: |
388 | iser_dto_buffs_release(send_dto); | 388 | iser_dto_buffs_release(send_dto); |
389 | iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); | 389 | iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); |
390 | return err; | 390 | return err; |
391 | } | 391 | } |
392 | 392 | ||
393 | /** | 393 | /** |
394 | * iser_send_data_out - send data out PDU | 394 | * iser_send_data_out - send data out PDU |
395 | */ | 395 | */ |
396 | int iser_send_data_out(struct iscsi_conn *conn, | 396 | int iser_send_data_out(struct iscsi_conn *conn, |
397 | struct iscsi_cmd_task *ctask, | 397 | struct iscsi_task *task, |
398 | struct iscsi_data *hdr) | 398 | struct iscsi_data *hdr) |
399 | { | 399 | { |
400 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 400 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
401 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 401 | struct iscsi_iser_task *iser_task = task->dd_data; |
402 | struct iser_desc *tx_desc = NULL; | 402 | struct iser_desc *tx_desc = NULL; |
403 | struct iser_dto *send_dto = NULL; | 403 | struct iser_dto *send_dto = NULL; |
404 | unsigned long buf_offset; | 404 | unsigned long buf_offset; |
@@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
411 | return -EPERM; | 411 | return -EPERM; |
412 | } | 412 | } |
413 | 413 | ||
414 | if (iser_check_xmit(conn, ctask)) | 414 | if (iser_check_xmit(conn, task)) |
415 | return -ENOBUFS; | 415 | return -ENOBUFS; |
416 | 416 | ||
417 | itt = (__force uint32_t)hdr->itt; | 417 | itt = (__force uint32_t)hdr->itt; |
@@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
432 | 432 | ||
433 | /* build the tx desc regd header and add it to the tx desc dto */ | 433 | /* build the tx desc regd header and add it to the tx desc dto */ |
434 | send_dto = &tx_desc->dto; | 434 | send_dto = &tx_desc->dto; |
435 | send_dto->ctask = iser_ctask; | 435 | send_dto->task = iser_task; |
436 | iser_create_send_desc(iser_conn, tx_desc); | 436 | iser_create_send_desc(iser_conn, tx_desc); |
437 | 437 | ||
438 | iser_reg_single(iser_conn->ib_conn->device, | 438 | iser_reg_single(iser_conn->ib_conn->device, |
@@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
440 | 440 | ||
441 | /* all data was registered for RDMA, we can use the lkey */ | 441 | /* all data was registered for RDMA, we can use the lkey */ |
442 | iser_dto_add_regd_buff(send_dto, | 442 | iser_dto_add_regd_buff(send_dto, |
443 | &iser_ctask->rdma_regd[ISER_DIR_OUT], | 443 | &iser_task->rdma_regd[ISER_DIR_OUT], |
444 | buf_offset, | 444 | buf_offset, |
445 | data_seg_len); | 445 | data_seg_len); |
446 | 446 | ||
447 | if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { | 447 | if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { |
448 | iser_err("Offset:%ld & DSL:%ld in Data-Out " | 448 | iser_err("Offset:%ld & DSL:%ld in Data-Out " |
449 | "inconsistent with total len:%ld, itt:%d\n", | 449 | "inconsistent with total len:%ld, itt:%d\n", |
450 | buf_offset, data_seg_len, | 450 | buf_offset, data_seg_len, |
451 | iser_ctask->data[ISER_DIR_OUT].data_len, itt); | 451 | iser_task->data[ISER_DIR_OUT].data_len, itt); |
452 | err = -EINVAL; | 452 | err = -EINVAL; |
453 | goto send_data_out_error; | 453 | goto send_data_out_error; |
454 | } | 454 | } |
@@ -468,10 +468,11 @@ send_data_out_error: | |||
468 | } | 468 | } |
469 | 469 | ||
470 | int iser_send_control(struct iscsi_conn *conn, | 470 | int iser_send_control(struct iscsi_conn *conn, |
471 | struct iscsi_mgmt_task *mtask) | 471 | struct iscsi_task *task) |
472 | { | 472 | { |
473 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 473 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
474 | struct iser_desc *mdesc = mtask->dd_data; | 474 | struct iscsi_iser_task *iser_task = task->dd_data; |
475 | struct iser_desc *mdesc = &iser_task->desc; | ||
475 | struct iser_dto *send_dto = NULL; | 476 | struct iser_dto *send_dto = NULL; |
476 | unsigned long data_seg_len; | 477 | unsigned long data_seg_len; |
477 | int err = 0; | 478 | int err = 0; |
@@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn, | |||
483 | return -EPERM; | 484 | return -EPERM; |
484 | } | 485 | } |
485 | 486 | ||
486 | if (iser_check_xmit(conn,mtask)) | 487 | if (iser_check_xmit(conn, task)) |
487 | return -ENOBUFS; | 488 | return -ENOBUFS; |
488 | 489 | ||
489 | /* build the tx desc regd header and add it to the tx desc dto */ | 490 | /* build the tx desc regd header and add it to the tx desc dto */ |
490 | mdesc->type = ISCSI_TX_CONTROL; | 491 | mdesc->type = ISCSI_TX_CONTROL; |
491 | send_dto = &mdesc->dto; | 492 | send_dto = &mdesc->dto; |
492 | send_dto->ctask = NULL; | 493 | send_dto->task = NULL; |
493 | iser_create_send_desc(iser_conn, mdesc); | 494 | iser_create_send_desc(iser_conn, mdesc); |
494 | 495 | ||
495 | device = iser_conn->ib_conn->device; | 496 | device = iser_conn->ib_conn->device; |
496 | 497 | ||
497 | iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); | 498 | iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); |
498 | 499 | ||
499 | data_seg_len = ntoh24(mtask->hdr->dlength); | 500 | data_seg_len = ntoh24(task->hdr->dlength); |
500 | 501 | ||
501 | if (data_seg_len > 0) { | 502 | if (data_seg_len > 0) { |
502 | regd_buf = &mdesc->data_regd_buf; | 503 | regd_buf = &mdesc->data_regd_buf; |
503 | memset(regd_buf, 0, sizeof(struct iser_regd_buf)); | 504 | memset(regd_buf, 0, sizeof(struct iser_regd_buf)); |
504 | regd_buf->device = device; | 505 | regd_buf->device = device; |
505 | regd_buf->virt_addr = mtask->data; | 506 | regd_buf->virt_addr = task->data; |
506 | regd_buf->data_size = mtask->data_count; | 507 | regd_buf->data_size = task->data_count; |
507 | iser_reg_single(device, regd_buf, | 508 | iser_reg_single(device, regd_buf, |
508 | DMA_TO_DEVICE); | 509 | DMA_TO_DEVICE); |
509 | iser_dto_add_regd_buff(send_dto, regd_buf, | 510 | iser_dto_add_regd_buff(send_dto, regd_buf, |
@@ -533,15 +534,13 @@ send_control_error: | |||
533 | void iser_rcv_completion(struct iser_desc *rx_desc, | 534 | void iser_rcv_completion(struct iser_desc *rx_desc, |
534 | unsigned long dto_xfer_len) | 535 | unsigned long dto_xfer_len) |
535 | { | 536 | { |
536 | struct iser_dto *dto = &rx_desc->dto; | 537 | struct iser_dto *dto = &rx_desc->dto; |
537 | struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; | 538 | struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; |
538 | struct iscsi_session *session = conn->iscsi_conn->session; | 539 | struct iscsi_task *task; |
539 | struct iscsi_cmd_task *ctask; | 540 | struct iscsi_iser_task *iser_task; |
540 | struct iscsi_iser_cmd_task *iser_ctask; | ||
541 | struct iscsi_hdr *hdr; | 541 | struct iscsi_hdr *hdr; |
542 | char *rx_data = NULL; | 542 | char *rx_data = NULL; |
543 | int rx_data_len = 0; | 543 | int rx_data_len = 0; |
544 | unsigned int itt; | ||
545 | unsigned char opcode; | 544 | unsigned char opcode; |
546 | 545 | ||
547 | hdr = &rx_desc->iscsi_header; | 546 | hdr = &rx_desc->iscsi_header; |
@@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
557 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; | 556 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; |
558 | 557 | ||
559 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { | 558 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { |
560 | itt = get_itt(hdr->itt); /* mask out cid and age bits */ | 559 | spin_lock(&conn->iscsi_conn->session->lock); |
561 | if (!(itt < session->cmds_max)) | 560 | task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); |
561 | if (task) | ||
562 | __iscsi_get_task(task); | ||
563 | spin_unlock(&conn->iscsi_conn->session->lock); | ||
564 | |||
565 | if (!task) | ||
562 | iser_err("itt can't be matched to task!!! " | 566 | iser_err("itt can't be matched to task!!! " |
563 | "conn %p opcode %d cmds_max %d itt %d\n", | 567 | "conn %p opcode %d itt %d\n", |
564 | conn->iscsi_conn,opcode,session->cmds_max,itt); | 568 | conn->iscsi_conn, opcode, hdr->itt); |
565 | /* use the mapping given with the cmds array indexed by itt */ | 569 | else { |
566 | ctask = (struct iscsi_cmd_task *)session->cmds[itt]; | 570 | iser_task = task->dd_data; |
567 | iser_ctask = ctask->dd_data; | 571 | iser_dbg("itt %d task %p\n",hdr->itt, task); |
568 | iser_dbg("itt %d ctask %p\n",itt,ctask); | 572 | iser_task->status = ISER_TASK_STATUS_COMPLETED; |
569 | iser_ctask->status = ISER_TASK_STATUS_COMPLETED; | 573 | iser_task_rdma_finalize(iser_task); |
570 | iser_ctask_rdma_finalize(iser_ctask); | 574 | iscsi_put_task(task); |
575 | } | ||
571 | } | 576 | } |
572 | |||
573 | iser_dto_buffs_release(dto); | 577 | iser_dto_buffs_release(dto); |
574 | 578 | ||
575 | iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); | 579 | iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); |
@@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
590 | struct iser_conn *ib_conn = dto->ib_conn; | 594 | struct iser_conn *ib_conn = dto->ib_conn; |
591 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; | 595 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; |
592 | struct iscsi_conn *conn = iser_conn->iscsi_conn; | 596 | struct iscsi_conn *conn = iser_conn->iscsi_conn; |
593 | struct iscsi_mgmt_task *mtask; | 597 | struct iscsi_task *task; |
594 | int resume_tx = 0; | 598 | int resume_tx = 0; |
595 | 599 | ||
596 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); | 600 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); |
@@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
613 | 617 | ||
614 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 618 | if (tx_desc->type == ISCSI_TX_CONTROL) { |
615 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 619 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
616 | mtask = (void *) ((long)(void *)tx_desc - | 620 | task = (void *) ((long)(void *)tx_desc - |
617 | sizeof(struct iscsi_mgmt_task)); | 621 | sizeof(struct iscsi_task)); |
618 | if (mtask->hdr->itt == RESERVED_ITT) { | 622 | if (task->hdr->itt == RESERVED_ITT) |
619 | struct iscsi_session *session = conn->session; | 623 | iscsi_put_task(task); |
620 | |||
621 | spin_lock(&conn->session->lock); | ||
622 | iscsi_free_mgmt_task(conn, mtask); | ||
623 | spin_unlock(&session->lock); | ||
624 | } | ||
625 | } | 624 | } |
626 | } | 625 | } |
627 | 626 | ||
628 | void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) | 627 | void iser_task_rdma_init(struct iscsi_iser_task *iser_task) |
629 | 628 | ||
630 | { | 629 | { |
631 | iser_ctask->status = ISER_TASK_STATUS_INIT; | 630 | iser_task->status = ISER_TASK_STATUS_INIT; |
632 | 631 | ||
633 | iser_ctask->dir[ISER_DIR_IN] = 0; | 632 | iser_task->dir[ISER_DIR_IN] = 0; |
634 | iser_ctask->dir[ISER_DIR_OUT] = 0; | 633 | iser_task->dir[ISER_DIR_OUT] = 0; |
635 | 634 | ||
636 | iser_ctask->data[ISER_DIR_IN].data_len = 0; | 635 | iser_task->data[ISER_DIR_IN].data_len = 0; |
637 | iser_ctask->data[ISER_DIR_OUT].data_len = 0; | 636 | iser_task->data[ISER_DIR_OUT].data_len = 0; |
638 | 637 | ||
639 | memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, | 638 | memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, |
640 | sizeof(struct iser_regd_buf)); | 639 | sizeof(struct iser_regd_buf)); |
641 | memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, | 640 | memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, |
642 | sizeof(struct iser_regd_buf)); | 641 | sizeof(struct iser_regd_buf)); |
643 | } | 642 | } |
644 | 643 | ||
645 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | 644 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) |
646 | { | 645 | { |
647 | int deferred; | 646 | int deferred; |
648 | int is_rdma_aligned = 1; | 647 | int is_rdma_aligned = 1; |
@@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
651 | /* if we were reading, copy back to unaligned sglist, | 650 | /* if we were reading, copy back to unaligned sglist, |
652 | * anyway dma_unmap and free the copy | 651 | * anyway dma_unmap and free the copy |
653 | */ | 652 | */ |
654 | if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { | 653 | if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { |
655 | is_rdma_aligned = 0; | 654 | is_rdma_aligned = 0; |
656 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); | 655 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); |
657 | } | 656 | } |
658 | if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { | 657 | if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { |
659 | is_rdma_aligned = 0; | 658 | is_rdma_aligned = 0; |
660 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); | 659 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); |
661 | } | 660 | } |
662 | 661 | ||
663 | if (iser_ctask->dir[ISER_DIR_IN]) { | 662 | if (iser_task->dir[ISER_DIR_IN]) { |
664 | regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; | 663 | regd = &iser_task->rdma_regd[ISER_DIR_IN]; |
665 | deferred = iser_regd_buff_release(regd); | 664 | deferred = iser_regd_buff_release(regd); |
666 | if (deferred) { | 665 | if (deferred) { |
667 | iser_err("%d references remain for BUF-IN rdma reg\n", | 666 | iser_err("%d references remain for BUF-IN rdma reg\n", |
@@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
669 | } | 668 | } |
670 | } | 669 | } |
671 | 670 | ||
672 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 671 | if (iser_task->dir[ISER_DIR_OUT]) { |
673 | regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; | 672 | regd = &iser_task->rdma_regd[ISER_DIR_OUT]; |
674 | deferred = iser_regd_buff_release(regd); | 673 | deferred = iser_regd_buff_release(regd); |
675 | if (deferred) { | 674 | if (deferred) { |
676 | iser_err("%d references remain for BUF-OUT rdma reg\n", | 675 | iser_err("%d references remain for BUF-OUT rdma reg\n", |
@@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
680 | 679 | ||
681 | /* if the data was unaligned, it was already unmapped and then copied */ | 680 | /* if the data was unaligned, it was already unmapped and then copied */ |
682 | if (is_rdma_aligned) | 681 | if (is_rdma_aligned) |
683 | iser_dma_unmap_task_data(iser_ctask); | 682 | iser_dma_unmap_task_data(iser_task); |
684 | } | 683 | } |
685 | 684 | ||
686 | void iser_dto_buffs_release(struct iser_dto *dto) | 685 | void iser_dto_buffs_release(struct iser_dto *dto) |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 81e49cb10ed3..b9453d068e9d 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device, | |||
99 | /** | 99 | /** |
100 | * iser_start_rdma_unaligned_sg | 100 | * iser_start_rdma_unaligned_sg |
101 | */ | 101 | */ |
102 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 102 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
103 | enum iser_data_dir cmd_dir) | 103 | enum iser_data_dir cmd_dir) |
104 | { | 104 | { |
105 | int dma_nents; | 105 | int dma_nents; |
106 | struct ib_device *dev; | 106 | struct ib_device *dev; |
107 | char *mem = NULL; | 107 | char *mem = NULL; |
108 | struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; | 108 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; |
109 | unsigned long cmd_data_len = data->data_len; | 109 | unsigned long cmd_data_len = data->data_len; |
110 | 110 | ||
111 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 111 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
@@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
141 | sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); | 141 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); |
142 | iser_ctask->data_copy[cmd_dir].buf = | 142 | iser_task->data_copy[cmd_dir].buf = |
143 | &iser_ctask->data_copy[cmd_dir].sg_single; | 143 | &iser_task->data_copy[cmd_dir].sg_single; |
144 | iser_ctask->data_copy[cmd_dir].size = 1; | 144 | iser_task->data_copy[cmd_dir].size = 1; |
145 | 145 | ||
146 | iser_ctask->data_copy[cmd_dir].copy_buf = mem; | 146 | iser_task->data_copy[cmd_dir].copy_buf = mem; |
147 | 147 | ||
148 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 148 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
149 | dma_nents = ib_dma_map_sg(dev, | 149 | dma_nents = ib_dma_map_sg(dev, |
150 | &iser_ctask->data_copy[cmd_dir].sg_single, | 150 | &iser_task->data_copy[cmd_dir].sg_single, |
151 | 1, | 151 | 1, |
152 | (cmd_dir == ISER_DIR_OUT) ? | 152 | (cmd_dir == ISER_DIR_OUT) ? |
153 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 153 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
154 | BUG_ON(dma_nents == 0); | 154 | BUG_ON(dma_nents == 0); |
155 | 155 | ||
156 | iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; | 156 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; |
157 | return 0; | 157 | return 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | /** | 160 | /** |
161 | * iser_finalize_rdma_unaligned_sg | 161 | * iser_finalize_rdma_unaligned_sg |
162 | */ | 162 | */ |
163 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 163 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
164 | enum iser_data_dir cmd_dir) | 164 | enum iser_data_dir cmd_dir) |
165 | { | 165 | { |
166 | struct ib_device *dev; | 166 | struct ib_device *dev; |
167 | struct iser_data_buf *mem_copy; | 167 | struct iser_data_buf *mem_copy; |
168 | unsigned long cmd_data_len; | 168 | unsigned long cmd_data_len; |
169 | 169 | ||
170 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 170 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
171 | mem_copy = &iser_ctask->data_copy[cmd_dir]; | 171 | mem_copy = &iser_task->data_copy[cmd_dir]; |
172 | 172 | ||
173 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, | 173 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
174 | (cmd_dir == ISER_DIR_OUT) ? | 174 | (cmd_dir == ISER_DIR_OUT) ? |
@@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
184 | /* copy back read RDMA to unaligned sg */ | 184 | /* copy back read RDMA to unaligned sg */ |
185 | mem = mem_copy->copy_buf; | 185 | mem = mem_copy->copy_buf; |
186 | 186 | ||
187 | sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | 187 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; |
188 | sg_size = iser_ctask->data[ISER_DIR_IN].size; | 188 | sg_size = iser_task->data[ISER_DIR_IN].size; |
189 | 189 | ||
190 | p = mem; | 190 | p = mem; |
191 | for_each_sg(sgl, sg, sg_size, i) { | 191 | for_each_sg(sgl, sg, sg_size, i) { |
@@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | cmd_data_len = iser_ctask->data[cmd_dir].data_len; | 201 | cmd_data_len = iser_task->data[cmd_dir].data_len; |
202 | 202 | ||
203 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 203 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
204 | free_pages((unsigned long)mem_copy->copy_buf, | 204 | free_pages((unsigned long)mem_copy->copy_buf, |
@@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data, | |||
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
379 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | 379 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
380 | struct iser_data_buf *data, | 380 | struct iser_data_buf *data, |
381 | enum iser_data_dir iser_dir, | 381 | enum iser_data_dir iser_dir, |
382 | enum dma_data_direction dma_dir) | 382 | enum dma_data_direction dma_dir) |
383 | { | 383 | { |
384 | struct ib_device *dev; | 384 | struct ib_device *dev; |
385 | 385 | ||
386 | iser_ctask->dir[iser_dir] = 1; | 386 | iser_task->dir[iser_dir] = 1; |
387 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 387 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
388 | 388 | ||
389 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); | 389 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
390 | if (data->dma_nents == 0) { | 390 | if (data->dma_nents == 0) { |
@@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | |||
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | 396 | ||
397 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | 397 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) |
398 | { | 398 | { |
399 | struct ib_device *dev; | 399 | struct ib_device *dev; |
400 | struct iser_data_buf *data; | 400 | struct iser_data_buf *data; |
401 | 401 | ||
402 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 402 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
403 | 403 | ||
404 | if (iser_ctask->dir[ISER_DIR_IN]) { | 404 | if (iser_task->dir[ISER_DIR_IN]) { |
405 | data = &iser_ctask->data[ISER_DIR_IN]; | 405 | data = &iser_task->data[ISER_DIR_IN]; |
406 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | 406 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
407 | } | 407 | } |
408 | 408 | ||
409 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 409 | if (iser_task->dir[ISER_DIR_OUT]) { |
410 | data = &iser_ctask->data[ISER_DIR_OUT]; | 410 | data = &iser_task->data[ISER_DIR_OUT]; |
411 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); | 411 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
412 | } | 412 | } |
413 | } | 413 | } |
@@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | |||
418 | * | 418 | * |
419 | * returns 0 on success, errno code on failure | 419 | * returns 0 on success, errno code on failure |
420 | */ | 420 | */ |
421 | int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | 421 | int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
422 | enum iser_data_dir cmd_dir) | 422 | enum iser_data_dir cmd_dir) |
423 | { | 423 | { |
424 | struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; | 424 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
425 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; | 425 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; |
426 | struct iser_device *device = ib_conn->device; | 426 | struct iser_device *device = ib_conn->device; |
427 | struct ib_device *ibdev = device->ib_device; | 427 | struct ib_device *ibdev = device->ib_device; |
428 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; | 428 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
429 | struct iser_regd_buf *regd_buf; | 429 | struct iser_regd_buf *regd_buf; |
430 | int aligned_len; | 430 | int aligned_len; |
431 | int err; | 431 | int err; |
432 | int i; | 432 | int i; |
433 | struct scatterlist *sg; | 433 | struct scatterlist *sg; |
434 | 434 | ||
435 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; | 435 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
436 | 436 | ||
437 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 437 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
438 | if (aligned_len != mem->dma_nents) { | 438 | if (aligned_len != mem->dma_nents) { |
@@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
442 | iser_data_buf_dump(mem, ibdev); | 442 | iser_data_buf_dump(mem, ibdev); |
443 | 443 | ||
444 | /* unmap the command data before accessing it */ | 444 | /* unmap the command data before accessing it */ |
445 | iser_dma_unmap_task_data(iser_ctask); | 445 | iser_dma_unmap_task_data(iser_task); |
446 | 446 | ||
447 | /* allocate copy buf, if we are writing, copy the */ | 447 | /* allocate copy buf, if we are writing, copy the */ |
448 | /* unaligned scatterlist, dma map the copy */ | 448 | /* unaligned scatterlist, dma map the copy */ |
449 | if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) | 449 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) |
450 | return -ENOMEM; | 450 | return -ENOMEM; |
451 | mem = &iser_ctask->data_copy[cmd_dir]; | 451 | mem = &iser_task->data_copy[cmd_dir]; |
452 | } | 452 | } |
453 | 453 | ||
454 | /* if there a single dma entry, FMR is not needed */ | 454 | /* if there a single dma entry, FMR is not needed */ |
@@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
472 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 472 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
473 | if (err) { | 473 | if (err) { |
474 | iser_data_buf_dump(mem, ibdev); | 474 | iser_data_buf_dump(mem, ibdev); |
475 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | 475 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
476 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | 476 | mem->dma_nents, |
477 | ntoh24(iser_task->desc.iscsi_header.dlength)); | ||
477 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 478 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
478 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, | 479 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, |
479 | ib_conn->page_vec->offset); | 480 | ib_conn->page_vec->offset); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 77cabee7cc08..3a917c1f796f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn) | |||
323 | iser_device_try_release(device); | 323 | iser_device_try_release(device); |
324 | if (ib_conn->iser_conn) | 324 | if (ib_conn->iser_conn) |
325 | ib_conn->iser_conn->ib_conn = NULL; | 325 | ib_conn->iser_conn->ib_conn = NULL; |
326 | kfree(ib_conn); | 326 | iscsi_destroy_endpoint(ib_conn->ep); |
327 | } | ||
328 | |||
329 | void iser_conn_get(struct iser_conn *ib_conn) | ||
330 | { | ||
331 | atomic_inc(&ib_conn->refcount); | ||
332 | } | ||
333 | |||
334 | void iser_conn_put(struct iser_conn *ib_conn) | ||
335 | { | ||
336 | if (atomic_dec_and_test(&ib_conn->refcount)) | ||
337 | iser_conn_release(ib_conn); | ||
327 | } | 338 | } |
328 | 339 | ||
329 | /** | 340 | /** |
@@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn) | |||
347 | wait_event_interruptible(ib_conn->wait, | 358 | wait_event_interruptible(ib_conn->wait, |
348 | ib_conn->state == ISER_CONN_DOWN); | 359 | ib_conn->state == ISER_CONN_DOWN); |
349 | 360 | ||
350 | iser_conn_release(ib_conn); | 361 | iser_conn_put(ib_conn); |
351 | } | 362 | } |
352 | 363 | ||
353 | static void iser_connect_error(struct rdma_cm_id *cma_id) | 364 | static void iser_connect_error(struct rdma_cm_id *cma_id) |
@@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
481 | return ret; | 492 | return ret; |
482 | } | 493 | } |
483 | 494 | ||
484 | int iser_conn_init(struct iser_conn **ibconn) | 495 | void iser_conn_init(struct iser_conn *ib_conn) |
485 | { | 496 | { |
486 | struct iser_conn *ib_conn; | ||
487 | |||
488 | ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL); | ||
489 | if (!ib_conn) { | ||
490 | iser_err("can't alloc memory for struct iser_conn\n"); | ||
491 | return -ENOMEM; | ||
492 | } | ||
493 | ib_conn->state = ISER_CONN_INIT; | 497 | ib_conn->state = ISER_CONN_INIT; |
494 | init_waitqueue_head(&ib_conn->wait); | 498 | init_waitqueue_head(&ib_conn->wait); |
495 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 499 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
496 | atomic_set(&ib_conn->post_send_buf_count, 0); | 500 | atomic_set(&ib_conn->post_send_buf_count, 0); |
501 | atomic_set(&ib_conn->refcount, 1); | ||
497 | INIT_LIST_HEAD(&ib_conn->conn_list); | 502 | INIT_LIST_HEAD(&ib_conn->conn_list); |
498 | spin_lock_init(&ib_conn->lock); | 503 | spin_lock_init(&ib_conn->lock); |
499 | |||
500 | *ibconn = ib_conn; | ||
501 | return 0; | ||
502 | } | 504 | } |
503 | 505 | ||
504 | /** | 506 | /** |