diff options
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 114 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 24 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 190 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 77 |
4 files changed, 203 insertions, 202 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index baecca1ed42a..86d9c42f0d33 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -124,33 +124,33 @@ error: | |||
124 | 124 | ||
125 | 125 | ||
126 | /** | 126 | /** |
127 | * iscsi_iser_task_init - Initialize ctask | 127 | * iscsi_iser_task_init - Initialize task |
128 | * @ctask: iscsi ctask | 128 | * @task: iscsi task |
129 | * | 129 | * |
130 | * Initialize the ctask for the scsi command or mgmt command. | 130 | * Initialize the task for the scsi command or mgmt command. |
131 | */ | 131 | */ |
132 | static int | 132 | static int |
133 | iscsi_iser_task_init(struct iscsi_cmd_task *ctask) | 133 | iscsi_iser_task_init(struct iscsi_task *task) |
134 | { | 134 | { |
135 | struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; | 135 | struct iscsi_iser_conn *iser_conn = task->conn->dd_data; |
136 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 136 | struct iscsi_iser_task *iser_task = task->dd_data; |
137 | 137 | ||
138 | /* mgmt ctask */ | 138 | /* mgmt task */ |
139 | if (!ctask->sc) { | 139 | if (!task->sc) { |
140 | iser_ctask->desc.data = ctask->data; | 140 | iser_task->desc.data = task->data; |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
143 | 143 | ||
144 | iser_ctask->command_sent = 0; | 144 | iser_task->command_sent = 0; |
145 | iser_ctask->iser_conn = iser_conn; | 145 | iser_task->iser_conn = iser_conn; |
146 | iser_ctask_rdma_init(iser_ctask); | 146 | iser_task_rdma_init(iser_task); |
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * iscsi_iser_mtask_xmit - xmit management(immediate) ctask | 151 | * iscsi_iser_mtask_xmit - xmit management(immediate) task |
152 | * @conn: iscsi connection | 152 | * @conn: iscsi connection |
153 | * @ctask: ctask management ctask | 153 | * @task: task management task |
154 | * | 154 | * |
155 | * Notes: | 155 | * Notes: |
156 | * The function can return -EAGAIN in which case caller must | 156 | * The function can return -EAGAIN in which case caller must |
@@ -159,19 +159,19 @@ iscsi_iser_task_init(struct iscsi_cmd_task *ctask) | |||
159 | * | 159 | * |
160 | **/ | 160 | **/ |
161 | static int | 161 | static int |
162 | iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 162 | iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) |
163 | { | 163 | { |
164 | int error = 0; | 164 | int error = 0; |
165 | 165 | ||
166 | debug_scsi("ctask deq [cid %d itt 0x%x]\n", conn->id, ctask->itt); | 166 | debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); |
167 | 167 | ||
168 | error = iser_send_control(conn, ctask); | 168 | error = iser_send_control(conn, task); |
169 | 169 | ||
170 | /* since iser xmits control with zero copy, ctasks can not be recycled | 170 | /* since iser xmits control with zero copy, tasks can not be recycled |
171 | * right after sending them. | 171 | * right after sending them. |
172 | * The recycling scheme is based on whether a response is expected | 172 | * The recycling scheme is based on whether a response is expected |
173 | * - if yes, the ctask is recycled at iscsi_complete_pdu | 173 | * - if yes, the task is recycled at iscsi_complete_pdu |
174 | * - if no, the ctask is recycled at iser_snd_completion | 174 | * - if no, the task is recycled at iser_snd_completion |
175 | */ | 175 | */ |
176 | if (error && error != -ENOBUFS) | 176 | if (error && error != -ENOBUFS) |
177 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 177 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
@@ -181,27 +181,27 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
181 | 181 | ||
182 | static int | 182 | static int |
183 | iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, | 183 | iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, |
184 | struct iscsi_cmd_task *ctask) | 184 | struct iscsi_task *task) |
185 | { | 185 | { |
186 | struct iscsi_data hdr; | 186 | struct iscsi_data hdr; |
187 | int error = 0; | 187 | int error = 0; |
188 | 188 | ||
189 | /* Send data-out PDUs while there's still unsolicited data to send */ | 189 | /* Send data-out PDUs while there's still unsolicited data to send */ |
190 | while (ctask->unsol_count > 0) { | 190 | while (task->unsol_count > 0) { |
191 | iscsi_prep_unsolicit_data_pdu(ctask, &hdr); | 191 | iscsi_prep_unsolicit_data_pdu(task, &hdr); |
192 | debug_scsi("Sending data-out: itt 0x%x, data count %d\n", | 192 | debug_scsi("Sending data-out: itt 0x%x, data count %d\n", |
193 | hdr.itt, ctask->data_count); | 193 | hdr.itt, task->data_count); |
194 | 194 | ||
195 | /* the buffer description has been passed with the command */ | 195 | /* the buffer description has been passed with the command */ |
196 | /* Send the command */ | 196 | /* Send the command */ |
197 | error = iser_send_data_out(conn, ctask, &hdr); | 197 | error = iser_send_data_out(conn, task, &hdr); |
198 | if (error) { | 198 | if (error) { |
199 | ctask->unsol_datasn--; | 199 | task->unsol_datasn--; |
200 | goto iscsi_iser_task_xmit_unsol_data_exit; | 200 | goto iscsi_iser_task_xmit_unsol_data_exit; |
201 | } | 201 | } |
202 | ctask->unsol_count -= ctask->data_count; | 202 | task->unsol_count -= task->data_count; |
203 | debug_scsi("Need to send %d more as data-out PDUs\n", | 203 | debug_scsi("Need to send %d more as data-out PDUs\n", |
204 | ctask->unsol_count); | 204 | task->unsol_count); |
205 | } | 205 | } |
206 | 206 | ||
207 | iscsi_iser_task_xmit_unsol_data_exit: | 207 | iscsi_iser_task_xmit_unsol_data_exit: |
@@ -209,37 +209,37 @@ iscsi_iser_task_xmit_unsol_data_exit: | |||
209 | } | 209 | } |
210 | 210 | ||
211 | static int | 211 | static int |
212 | iscsi_iser_task_xmit(struct iscsi_cmd_task *ctask) | 212 | iscsi_iser_task_xmit(struct iscsi_task *task) |
213 | { | 213 | { |
214 | struct iscsi_conn *conn = ctask->conn; | 214 | struct iscsi_conn *conn = task->conn; |
215 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 215 | struct iscsi_iser_task *iser_task = task->dd_data; |
216 | int error = 0; | 216 | int error = 0; |
217 | 217 | ||
218 | if (!ctask->sc) | 218 | if (!task->sc) |
219 | return iscsi_iser_mtask_xmit(conn, ctask); | 219 | return iscsi_iser_mtask_xmit(conn, task); |
220 | 220 | ||
221 | if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { | 221 | if (task->sc->sc_data_direction == DMA_TO_DEVICE) { |
222 | BUG_ON(scsi_bufflen(ctask->sc) == 0); | 222 | BUG_ON(scsi_bufflen(task->sc) == 0); |
223 | 223 | ||
224 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", | 224 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", |
225 | ctask->itt, scsi_bufflen(ctask->sc), | 225 | task->itt, scsi_bufflen(task->sc), |
226 | ctask->imm_count, ctask->unsol_count); | 226 | task->imm_count, task->unsol_count); |
227 | } | 227 | } |
228 | 228 | ||
229 | debug_scsi("ctask deq [cid %d itt 0x%x]\n", | 229 | debug_scsi("task deq [cid %d itt 0x%x]\n", |
230 | conn->id, ctask->itt); | 230 | conn->id, task->itt); |
231 | 231 | ||
232 | /* Send the cmd PDU */ | 232 | /* Send the cmd PDU */ |
233 | if (!iser_ctask->command_sent) { | 233 | if (!iser_task->command_sent) { |
234 | error = iser_send_command(conn, ctask); | 234 | error = iser_send_command(conn, task); |
235 | if (error) | 235 | if (error) |
236 | goto iscsi_iser_task_xmit_exit; | 236 | goto iscsi_iser_task_xmit_exit; |
237 | iser_ctask->command_sent = 1; | 237 | iser_task->command_sent = 1; |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Send unsolicited data-out PDU(s) if necessary */ | 240 | /* Send unsolicited data-out PDU(s) if necessary */ |
241 | if (ctask->unsol_count) | 241 | if (task->unsol_count) |
242 | error = iscsi_iser_task_xmit_unsol_data(conn, ctask); | 242 | error = iscsi_iser_task_xmit_unsol_data(conn, task); |
243 | 243 | ||
244 | iscsi_iser_task_xmit_exit: | 244 | iscsi_iser_task_xmit_exit: |
245 | if (error && error != -ENOBUFS) | 245 | if (error && error != -ENOBUFS) |
@@ -248,17 +248,17 @@ iscsi_iser_task_xmit(struct iscsi_cmd_task *ctask) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | static void | 250 | static void |
251 | iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 251 | iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) |
252 | { | 252 | { |
253 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 253 | struct iscsi_iser_task *iser_task = task->dd_data; |
254 | 254 | ||
255 | /* mgmt tasks do not need special cleanup */ | 255 | /* mgmt tasks do not need special cleanup */ |
256 | if (!ctask->sc) | 256 | if (!task->sc) |
257 | return; | 257 | return; |
258 | 258 | ||
259 | if (iser_ctask->status == ISER_TASK_STATUS_STARTED) { | 259 | if (iser_task->status == ISER_TASK_STATUS_STARTED) { |
260 | iser_ctask->status = ISER_TASK_STATUS_COMPLETED; | 260 | iser_task->status = ISER_TASK_STATUS_COMPLETED; |
261 | iser_ctask_rdma_finalize(iser_ctask); | 261 | iser_task_rdma_finalize(iser_task); |
262 | } | 262 | } |
263 | } | 263 | } |
264 | 264 | ||
@@ -408,8 +408,8 @@ iscsi_iser_session_create(struct Scsi_Host *shost, | |||
408 | struct iscsi_cls_session *cls_session; | 408 | struct iscsi_cls_session *cls_session; |
409 | struct iscsi_session *session; | 409 | struct iscsi_session *session; |
410 | int i; | 410 | int i; |
411 | struct iscsi_cmd_task *ctask; | 411 | struct iscsi_task *task; |
412 | struct iscsi_iser_cmd_task *iser_ctask; | 412 | struct iscsi_iser_task *iser_task; |
413 | 413 | ||
414 | if (shost) { | 414 | if (shost) { |
415 | printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n", | 415 | printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n", |
@@ -436,7 +436,7 @@ iscsi_iser_session_create(struct Scsi_Host *shost, | |||
436 | */ | 436 | */ |
437 | cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, | 437 | cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, |
438 | ISCSI_DEF_XMIT_CMDS_MAX, | 438 | ISCSI_DEF_XMIT_CMDS_MAX, |
439 | sizeof(struct iscsi_iser_cmd_task), | 439 | sizeof(struct iscsi_iser_task), |
440 | initial_cmdsn); | 440 | initial_cmdsn); |
441 | if (!cls_session) | 441 | if (!cls_session) |
442 | goto remove_host; | 442 | goto remove_host; |
@@ -445,10 +445,10 @@ iscsi_iser_session_create(struct Scsi_Host *shost, | |||
445 | shost->can_queue = session->scsi_cmds_max; | 445 | shost->can_queue = session->scsi_cmds_max; |
446 | /* libiscsi setup itts, data and pool so just set desc fields */ | 446 | /* libiscsi setup itts, data and pool so just set desc fields */ |
447 | for (i = 0; i < session->cmds_max; i++) { | 447 | for (i = 0; i < session->cmds_max; i++) { |
448 | ctask = session->cmds[i]; | 448 | task = session->cmds[i]; |
449 | iser_ctask = ctask->dd_data; | 449 | iser_task = task->dd_data; |
450 | ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header; | 450 | task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header; |
451 | ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header); | 451 | task->hdr_max = sizeof(iser_task->desc.iscsi_header); |
452 | } | 452 | } |
453 | return cls_session; | 453 | return cls_session; |
454 | 454 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 96a600f127c8..05431f270fe8 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -173,7 +173,7 @@ struct iser_data_buf { | |||
173 | /* fwd declarations */ | 173 | /* fwd declarations */ |
174 | struct iser_device; | 174 | struct iser_device; |
175 | struct iscsi_iser_conn; | 175 | struct iscsi_iser_conn; |
176 | struct iscsi_iser_cmd_task; | 176 | struct iscsi_iser_task; |
177 | 177 | ||
178 | struct iser_mem_reg { | 178 | struct iser_mem_reg { |
179 | u32 lkey; | 179 | u32 lkey; |
@@ -197,7 +197,7 @@ struct iser_regd_buf { | |||
197 | #define MAX_REGD_BUF_VECTOR_LEN 2 | 197 | #define MAX_REGD_BUF_VECTOR_LEN 2 |
198 | 198 | ||
199 | struct iser_dto { | 199 | struct iser_dto { |
200 | struct iscsi_iser_cmd_task *ctask; | 200 | struct iscsi_iser_task *task; |
201 | struct iser_conn *ib_conn; | 201 | struct iser_conn *ib_conn; |
202 | int notify_enable; | 202 | int notify_enable; |
203 | 203 | ||
@@ -265,7 +265,7 @@ struct iscsi_iser_conn { | |||
265 | rwlock_t lock; | 265 | rwlock_t lock; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | struct iscsi_iser_cmd_task { | 268 | struct iscsi_iser_task { |
269 | struct iser_desc desc; | 269 | struct iser_desc desc; |
270 | struct iscsi_iser_conn *iser_conn; | 270 | struct iscsi_iser_conn *iser_conn; |
271 | enum iser_task_status status; | 271 | enum iser_task_status status; |
@@ -299,13 +299,13 @@ extern int iser_debug_level; | |||
299 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); | 299 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); |
300 | 300 | ||
301 | int iser_send_control(struct iscsi_conn *conn, | 301 | int iser_send_control(struct iscsi_conn *conn, |
302 | struct iscsi_cmd_task *ctask); | 302 | struct iscsi_task *task); |
303 | 303 | ||
304 | int iser_send_command(struct iscsi_conn *conn, | 304 | int iser_send_command(struct iscsi_conn *conn, |
305 | struct iscsi_cmd_task *ctask); | 305 | struct iscsi_task *task); |
306 | 306 | ||
307 | int iser_send_data_out(struct iscsi_conn *conn, | 307 | int iser_send_data_out(struct iscsi_conn *conn, |
308 | struct iscsi_cmd_task *ctask, | 308 | struct iscsi_task *task, |
309 | struct iscsi_data *hdr); | 309 | struct iscsi_data *hdr); |
310 | 310 | ||
311 | void iscsi_iser_recv(struct iscsi_conn *conn, | 311 | void iscsi_iser_recv(struct iscsi_conn *conn, |
@@ -326,9 +326,9 @@ void iser_rcv_completion(struct iser_desc *desc, | |||
326 | 326 | ||
327 | void iser_snd_completion(struct iser_desc *desc); | 327 | void iser_snd_completion(struct iser_desc *desc); |
328 | 328 | ||
329 | void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask); | 329 | void iser_task_rdma_init(struct iscsi_iser_task *task); |
330 | 330 | ||
331 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask); | 331 | void iser_task_rdma_finalize(struct iscsi_iser_task *task); |
332 | 332 | ||
333 | void iser_dto_buffs_release(struct iser_dto *dto); | 333 | void iser_dto_buffs_release(struct iser_dto *dto); |
334 | 334 | ||
@@ -338,10 +338,10 @@ void iser_reg_single(struct iser_device *device, | |||
338 | struct iser_regd_buf *regd_buf, | 338 | struct iser_regd_buf *regd_buf, |
339 | enum dma_data_direction direction); | 339 | enum dma_data_direction direction); |
340 | 340 | ||
341 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, | 341 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, |
342 | enum iser_data_dir cmd_dir); | 342 | enum iser_data_dir cmd_dir); |
343 | 343 | ||
344 | int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask, | 344 | int iser_reg_rdma_mem(struct iscsi_iser_task *task, |
345 | enum iser_data_dir cmd_dir); | 345 | enum iser_data_dir cmd_dir); |
346 | 346 | ||
347 | int iser_connect(struct iser_conn *ib_conn, | 347 | int iser_connect(struct iser_conn *ib_conn, |
@@ -361,10 +361,10 @@ int iser_post_send(struct iser_desc *tx_desc); | |||
361 | int iser_conn_state_comp(struct iser_conn *ib_conn, | 361 | int iser_conn_state_comp(struct iser_conn *ib_conn, |
362 | enum iser_ib_conn_state comp); | 362 | enum iser_ib_conn_state comp); |
363 | 363 | ||
364 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | 364 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
365 | struct iser_data_buf *data, | 365 | struct iser_data_buf *data, |
366 | enum iser_data_dir iser_dir, | 366 | enum iser_data_dir iser_dir, |
367 | enum dma_data_direction dma_dir); | 367 | enum dma_data_direction dma_dir); |
368 | 368 | ||
369 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); | 369 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); |
370 | #endif | 370 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 4ea78fbeee95..35af60a23c61 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -66,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto, | |||
66 | 66 | ||
67 | /* Register user buffer memory and initialize passive rdma | 67 | /* Register user buffer memory and initialize passive rdma |
68 | * dto descriptor. Total data size is stored in | 68 | * dto descriptor. Total data size is stored in |
69 | * iser_ctask->data[ISER_DIR_IN].data_len | 69 | * iser_task->data[ISER_DIR_IN].data_len |
70 | */ | 70 | */ |
71 | static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, | 71 | static int iser_prepare_read_cmd(struct iscsi_task *task, |
72 | unsigned int edtl) | 72 | unsigned int edtl) |
73 | 73 | ||
74 | { | 74 | { |
75 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 75 | struct iscsi_iser_task *iser_task = task->dd_data; |
76 | struct iser_regd_buf *regd_buf; | 76 | struct iser_regd_buf *regd_buf; |
77 | int err; | 77 | int err; |
78 | struct iser_hdr *hdr = &iser_ctask->desc.iser_header; | 78 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
79 | struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; | 79 | struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; |
80 | 80 | ||
81 | err = iser_dma_map_task_data(iser_ctask, | 81 | err = iser_dma_map_task_data(iser_task, |
82 | buf_in, | 82 | buf_in, |
83 | ISER_DIR_IN, | 83 | ISER_DIR_IN, |
84 | DMA_FROM_DEVICE); | 84 | DMA_FROM_DEVICE); |
85 | if (err) | 85 | if (err) |
86 | return err; | 86 | return err; |
87 | 87 | ||
88 | if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { | 88 | if (edtl > iser_task->data[ISER_DIR_IN].data_len) { |
89 | iser_err("Total data length: %ld, less than EDTL: " | 89 | iser_err("Total data length: %ld, less than EDTL: " |
90 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", | 90 | "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", |
91 | iser_ctask->data[ISER_DIR_IN].data_len, edtl, | 91 | iser_task->data[ISER_DIR_IN].data_len, edtl, |
92 | ctask->itt, iser_ctask->iser_conn); | 92 | task->itt, iser_task->iser_conn); |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | } | 94 | } |
95 | 95 | ||
96 | err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); | 96 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); |
97 | if (err) { | 97 | if (err) { |
98 | iser_err("Failed to set up Data-IN RDMA\n"); | 98 | iser_err("Failed to set up Data-IN RDMA\n"); |
99 | return err; | 99 | return err; |
100 | } | 100 | } |
101 | regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; | 101 | regd_buf = &iser_task->rdma_regd[ISER_DIR_IN]; |
102 | 102 | ||
103 | hdr->flags |= ISER_RSV; | 103 | hdr->flags |= ISER_RSV; |
104 | hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); | 104 | hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); |
105 | hdr->read_va = cpu_to_be64(regd_buf->reg.va); | 105 | hdr->read_va = cpu_to_be64(regd_buf->reg.va); |
106 | 106 | ||
107 | iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", | 107 | iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", |
108 | ctask->itt, regd_buf->reg.rkey, | 108 | task->itt, regd_buf->reg.rkey, |
109 | (unsigned long long)regd_buf->reg.va); | 109 | (unsigned long long)regd_buf->reg.va); |
110 | 110 | ||
111 | return 0; | 111 | return 0; |
@@ -113,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, | |||
113 | 113 | ||
114 | /* Register user buffer memory and initialize passive rdma | 114 | /* Register user buffer memory and initialize passive rdma |
115 | * dto descriptor. Total data size is stored in | 115 | * dto descriptor. Total data size is stored in |
116 | * ctask->data[ISER_DIR_OUT].data_len | 116 | * task->data[ISER_DIR_OUT].data_len |
117 | */ | 117 | */ |
118 | static int | 118 | static int |
119 | iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, | 119 | iser_prepare_write_cmd(struct iscsi_task *task, |
120 | unsigned int imm_sz, | 120 | unsigned int imm_sz, |
121 | unsigned int unsol_sz, | 121 | unsigned int unsol_sz, |
122 | unsigned int edtl) | 122 | unsigned int edtl) |
123 | { | 123 | { |
124 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 124 | struct iscsi_iser_task *iser_task = task->dd_data; |
125 | struct iser_regd_buf *regd_buf; | 125 | struct iser_regd_buf *regd_buf; |
126 | int err; | 126 | int err; |
127 | struct iser_dto *send_dto = &iser_ctask->desc.dto; | 127 | struct iser_dto *send_dto = &iser_task->desc.dto; |
128 | struct iser_hdr *hdr = &iser_ctask->desc.iser_header; | 128 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
129 | struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; | 129 | struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; |
130 | 130 | ||
131 | err = iser_dma_map_task_data(iser_ctask, | 131 | err = iser_dma_map_task_data(iser_task, |
132 | buf_out, | 132 | buf_out, |
133 | ISER_DIR_OUT, | 133 | ISER_DIR_OUT, |
134 | DMA_TO_DEVICE); | 134 | DMA_TO_DEVICE); |
135 | if (err) | 135 | if (err) |
136 | return err; | 136 | return err; |
137 | 137 | ||
138 | if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { | 138 | if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { |
139 | iser_err("Total data length: %ld, less than EDTL: %d, " | 139 | iser_err("Total data length: %ld, less than EDTL: %d, " |
140 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", | 140 | "in WRITE cmd BHS itt: %d, conn: 0x%p\n", |
141 | iser_ctask->data[ISER_DIR_OUT].data_len, | 141 | iser_task->data[ISER_DIR_OUT].data_len, |
142 | edtl, ctask->itt, ctask->conn); | 142 | edtl, task->itt, task->conn); |
143 | return -EINVAL; | 143 | return -EINVAL; |
144 | } | 144 | } |
145 | 145 | ||
146 | err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); | 146 | err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); |
147 | if (err != 0) { | 147 | if (err != 0) { |
148 | iser_err("Failed to register write cmd RDMA mem\n"); | 148 | iser_err("Failed to register write cmd RDMA mem\n"); |
149 | return err; | 149 | return err; |
150 | } | 150 | } |
151 | 151 | ||
152 | regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; | 152 | regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; |
153 | 153 | ||
154 | if (unsol_sz < edtl) { | 154 | if (unsol_sz < edtl) { |
155 | hdr->flags |= ISER_WSV; | 155 | hdr->flags |= ISER_WSV; |
@@ -158,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, | |||
158 | 158 | ||
159 | iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " | 159 | iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " |
160 | "VA:%#llX + unsol:%d\n", | 160 | "VA:%#llX + unsol:%d\n", |
161 | ctask->itt, regd_buf->reg.rkey, | 161 | task->itt, regd_buf->reg.rkey, |
162 | (unsigned long long)regd_buf->reg.va, unsol_sz); | 162 | (unsigned long long)regd_buf->reg.va, unsol_sz); |
163 | } | 163 | } |
164 | 164 | ||
165 | if (imm_sz > 0) { | 165 | if (imm_sz > 0) { |
166 | iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", | 166 | iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", |
167 | ctask->itt, imm_sz); | 167 | task->itt, imm_sz); |
168 | iser_dto_add_regd_buff(send_dto, | 168 | iser_dto_add_regd_buff(send_dto, |
169 | regd_buf, | 169 | regd_buf, |
170 | 0, | 170 | 0, |
@@ -300,13 +300,13 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) | |||
300 | } | 300 | } |
301 | 301 | ||
302 | static int | 302 | static int |
303 | iser_check_xmit(struct iscsi_conn *conn, void *ctask) | 303 | iser_check_xmit(struct iscsi_conn *conn, void *task) |
304 | { | 304 | { |
305 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 305 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
306 | 306 | ||
307 | if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == | 307 | if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == |
308 | ISER_QP_MAX_REQ_DTOS) { | 308 | ISER_QP_MAX_REQ_DTOS) { |
309 | iser_dbg("%ld can't xmit ctask %p\n",jiffies,ctask); | 309 | iser_dbg("%ld can't xmit task %p\n",jiffies,task); |
310 | return -ENOBUFS; | 310 | return -ENOBUFS; |
311 | } | 311 | } |
312 | return 0; | 312 | return 0; |
@@ -317,37 +317,37 @@ iser_check_xmit(struct iscsi_conn *conn, void *ctask) | |||
317 | * iser_send_command - send command PDU | 317 | * iser_send_command - send command PDU |
318 | */ | 318 | */ |
319 | int iser_send_command(struct iscsi_conn *conn, | 319 | int iser_send_command(struct iscsi_conn *conn, |
320 | struct iscsi_cmd_task *ctask) | 320 | struct iscsi_task *task) |
321 | { | 321 | { |
322 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 322 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
323 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 323 | struct iscsi_iser_task *iser_task = task->dd_data; |
324 | struct iser_dto *send_dto = NULL; | 324 | struct iser_dto *send_dto = NULL; |
325 | unsigned long edtl; | 325 | unsigned long edtl; |
326 | int err = 0; | 326 | int err = 0; |
327 | struct iser_data_buf *data_buf; | 327 | struct iser_data_buf *data_buf; |
328 | 328 | ||
329 | struct iscsi_cmd *hdr = ctask->hdr; | 329 | struct iscsi_cmd *hdr = task->hdr; |
330 | struct scsi_cmnd *sc = ctask->sc; | 330 | struct scsi_cmnd *sc = task->sc; |
331 | 331 | ||
332 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { | 332 | if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { |
333 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); | 333 | iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); |
334 | return -EPERM; | 334 | return -EPERM; |
335 | } | 335 | } |
336 | if (iser_check_xmit(conn, ctask)) | 336 | if (iser_check_xmit(conn, task)) |
337 | return -ENOBUFS; | 337 | return -ENOBUFS; |
338 | 338 | ||
339 | edtl = ntohl(hdr->data_length); | 339 | edtl = ntohl(hdr->data_length); |
340 | 340 | ||
341 | /* build the tx desc regd header and add it to the tx desc dto */ | 341 | /* build the tx desc regd header and add it to the tx desc dto */ |
342 | iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; | 342 | iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; |
343 | send_dto = &iser_ctask->desc.dto; | 343 | send_dto = &iser_task->desc.dto; |
344 | send_dto->ctask = iser_ctask; | 344 | send_dto->task = iser_task; |
345 | iser_create_send_desc(iser_conn, &iser_ctask->desc); | 345 | iser_create_send_desc(iser_conn, &iser_task->desc); |
346 | 346 | ||
347 | if (hdr->flags & ISCSI_FLAG_CMD_READ) | 347 | if (hdr->flags & ISCSI_FLAG_CMD_READ) |
348 | data_buf = &iser_ctask->data[ISER_DIR_IN]; | 348 | data_buf = &iser_task->data[ISER_DIR_IN]; |
349 | else | 349 | else |
350 | data_buf = &iser_ctask->data[ISER_DIR_OUT]; | 350 | data_buf = &iser_task->data[ISER_DIR_OUT]; |
351 | 351 | ||
352 | if (scsi_sg_count(sc)) { /* using a scatter list */ | 352 | if (scsi_sg_count(sc)) { /* using a scatter list */ |
353 | data_buf->buf = scsi_sglist(sc); | 353 | data_buf->buf = scsi_sglist(sc); |
@@ -357,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn, | |||
357 | data_buf->data_len = scsi_bufflen(sc); | 357 | data_buf->data_len = scsi_bufflen(sc); |
358 | 358 | ||
359 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { | 359 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
360 | err = iser_prepare_read_cmd(ctask, edtl); | 360 | err = iser_prepare_read_cmd(task, edtl); |
361 | if (err) | 361 | if (err) |
362 | goto send_command_error; | 362 | goto send_command_error; |
363 | } | 363 | } |
364 | if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { | 364 | if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { |
365 | err = iser_prepare_write_cmd(ctask, | 365 | err = iser_prepare_write_cmd(task, |
366 | ctask->imm_count, | 366 | task->imm_count, |
367 | ctask->imm_count + | 367 | task->imm_count + |
368 | ctask->unsol_count, | 368 | task->unsol_count, |
369 | edtl); | 369 | edtl); |
370 | if (err) | 370 | if (err) |
371 | goto send_command_error; | 371 | goto send_command_error; |
@@ -380,15 +380,15 @@ int iser_send_command(struct iscsi_conn *conn, | |||
380 | goto send_command_error; | 380 | goto send_command_error; |
381 | } | 381 | } |
382 | 382 | ||
383 | iser_ctask->status = ISER_TASK_STATUS_STARTED; | 383 | iser_task->status = ISER_TASK_STATUS_STARTED; |
384 | 384 | ||
385 | err = iser_post_send(&iser_ctask->desc); | 385 | err = iser_post_send(&iser_task->desc); |
386 | if (!err) | 386 | if (!err) |
387 | return 0; | 387 | return 0; |
388 | 388 | ||
389 | send_command_error: | 389 | send_command_error: |
390 | iser_dto_buffs_release(send_dto); | 390 | iser_dto_buffs_release(send_dto); |
391 | iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); | 391 | iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); |
392 | return err; | 392 | return err; |
393 | } | 393 | } |
394 | 394 | ||
@@ -396,11 +396,11 @@ send_command_error: | |||
396 | * iser_send_data_out - send data out PDU | 396 | * iser_send_data_out - send data out PDU |
397 | */ | 397 | */ |
398 | int iser_send_data_out(struct iscsi_conn *conn, | 398 | int iser_send_data_out(struct iscsi_conn *conn, |
399 | struct iscsi_cmd_task *ctask, | 399 | struct iscsi_task *task, |
400 | struct iscsi_data *hdr) | 400 | struct iscsi_data *hdr) |
401 | { | 401 | { |
402 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 402 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
403 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 403 | struct iscsi_iser_task *iser_task = task->dd_data; |
404 | struct iser_desc *tx_desc = NULL; | 404 | struct iser_desc *tx_desc = NULL; |
405 | struct iser_dto *send_dto = NULL; | 405 | struct iser_dto *send_dto = NULL; |
406 | unsigned long buf_offset; | 406 | unsigned long buf_offset; |
@@ -413,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
413 | return -EPERM; | 413 | return -EPERM; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (iser_check_xmit(conn, ctask)) | 416 | if (iser_check_xmit(conn, task)) |
417 | return -ENOBUFS; | 417 | return -ENOBUFS; |
418 | 418 | ||
419 | itt = (__force uint32_t)hdr->itt; | 419 | itt = (__force uint32_t)hdr->itt; |
@@ -434,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
434 | 434 | ||
435 | /* build the tx desc regd header and add it to the tx desc dto */ | 435 | /* build the tx desc regd header and add it to the tx desc dto */ |
436 | send_dto = &tx_desc->dto; | 436 | send_dto = &tx_desc->dto; |
437 | send_dto->ctask = iser_ctask; | 437 | send_dto->task = iser_task; |
438 | iser_create_send_desc(iser_conn, tx_desc); | 438 | iser_create_send_desc(iser_conn, tx_desc); |
439 | 439 | ||
440 | iser_reg_single(iser_conn->ib_conn->device, | 440 | iser_reg_single(iser_conn->ib_conn->device, |
@@ -442,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
442 | 442 | ||
443 | /* all data was registered for RDMA, we can use the lkey */ | 443 | /* all data was registered for RDMA, we can use the lkey */ |
444 | iser_dto_add_regd_buff(send_dto, | 444 | iser_dto_add_regd_buff(send_dto, |
445 | &iser_ctask->rdma_regd[ISER_DIR_OUT], | 445 | &iser_task->rdma_regd[ISER_DIR_OUT], |
446 | buf_offset, | 446 | buf_offset, |
447 | data_seg_len); | 447 | data_seg_len); |
448 | 448 | ||
449 | if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { | 449 | if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { |
450 | iser_err("Offset:%ld & DSL:%ld in Data-Out " | 450 | iser_err("Offset:%ld & DSL:%ld in Data-Out " |
451 | "inconsistent with total len:%ld, itt:%d\n", | 451 | "inconsistent with total len:%ld, itt:%d\n", |
452 | buf_offset, data_seg_len, | 452 | buf_offset, data_seg_len, |
453 | iser_ctask->data[ISER_DIR_OUT].data_len, itt); | 453 | iser_task->data[ISER_DIR_OUT].data_len, itt); |
454 | err = -EINVAL; | 454 | err = -EINVAL; |
455 | goto send_data_out_error; | 455 | goto send_data_out_error; |
456 | } | 456 | } |
@@ -470,11 +470,11 @@ send_data_out_error: | |||
470 | } | 470 | } |
471 | 471 | ||
472 | int iser_send_control(struct iscsi_conn *conn, | 472 | int iser_send_control(struct iscsi_conn *conn, |
473 | struct iscsi_cmd_task *ctask) | 473 | struct iscsi_task *task) |
474 | { | 474 | { |
475 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 475 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
476 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 476 | struct iscsi_iser_task *iser_task = task->dd_data; |
477 | struct iser_desc *mdesc = &iser_ctask->desc; | 477 | struct iser_desc *mdesc = &iser_task->desc; |
478 | struct iser_dto *send_dto = NULL; | 478 | struct iser_dto *send_dto = NULL; |
479 | unsigned long data_seg_len; | 479 | unsigned long data_seg_len; |
480 | int err = 0; | 480 | int err = 0; |
@@ -486,27 +486,27 @@ int iser_send_control(struct iscsi_conn *conn, | |||
486 | return -EPERM; | 486 | return -EPERM; |
487 | } | 487 | } |
488 | 488 | ||
489 | if (iser_check_xmit(conn, ctask)) | 489 | if (iser_check_xmit(conn, task)) |
490 | return -ENOBUFS; | 490 | return -ENOBUFS; |
491 | 491 | ||
492 | /* build the tx desc regd header and add it to the tx desc dto */ | 492 | /* build the tx desc regd header and add it to the tx desc dto */ |
493 | mdesc->type = ISCSI_TX_CONTROL; | 493 | mdesc->type = ISCSI_TX_CONTROL; |
494 | send_dto = &mdesc->dto; | 494 | send_dto = &mdesc->dto; |
495 | send_dto->ctask = NULL; | 495 | send_dto->task = NULL; |
496 | iser_create_send_desc(iser_conn, mdesc); | 496 | iser_create_send_desc(iser_conn, mdesc); |
497 | 497 | ||
498 | device = iser_conn->ib_conn->device; | 498 | device = iser_conn->ib_conn->device; |
499 | 499 | ||
500 | iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); | 500 | iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); |
501 | 501 | ||
502 | data_seg_len = ntoh24(ctask->hdr->dlength); | 502 | data_seg_len = ntoh24(task->hdr->dlength); |
503 | 503 | ||
504 | if (data_seg_len > 0) { | 504 | if (data_seg_len > 0) { |
505 | regd_buf = &mdesc->data_regd_buf; | 505 | regd_buf = &mdesc->data_regd_buf; |
506 | memset(regd_buf, 0, sizeof(struct iser_regd_buf)); | 506 | memset(regd_buf, 0, sizeof(struct iser_regd_buf)); |
507 | regd_buf->device = device; | 507 | regd_buf->device = device; |
508 | regd_buf->virt_addr = ctask->data; | 508 | regd_buf->virt_addr = task->data; |
509 | regd_buf->data_size = ctask->data_count; | 509 | regd_buf->data_size = task->data_count; |
510 | iser_reg_single(device, regd_buf, | 510 | iser_reg_single(device, regd_buf, |
511 | DMA_TO_DEVICE); | 511 | DMA_TO_DEVICE); |
512 | iser_dto_add_regd_buff(send_dto, regd_buf, | 512 | iser_dto_add_regd_buff(send_dto, regd_buf, |
@@ -538,8 +538,8 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
538 | { | 538 | { |
539 | struct iser_dto *dto = &rx_desc->dto; | 539 | struct iser_dto *dto = &rx_desc->dto; |
540 | struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; | 540 | struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; |
541 | struct iscsi_cmd_task *ctask; | 541 | struct iscsi_task *task; |
542 | struct iscsi_iser_cmd_task *iser_ctask; | 542 | struct iscsi_iser_task *iser_task; |
543 | struct iscsi_hdr *hdr; | 543 | struct iscsi_hdr *hdr; |
544 | char *rx_data = NULL; | 544 | char *rx_data = NULL; |
545 | int rx_data_len = 0; | 545 | int rx_data_len = 0; |
@@ -558,16 +558,16 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
558 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; | 558 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; |
559 | 559 | ||
560 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { | 560 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { |
561 | ctask = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); | 561 | task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); |
562 | if (!ctask) | 562 | if (!task) |
563 | iser_err("itt can't be matched to ctask!!! " | 563 | iser_err("itt can't be matched to task!!! " |
564 | "conn %p opcode %d itt %d\n", | 564 | "conn %p opcode %d itt %d\n", |
565 | conn->iscsi_conn, opcode, hdr->itt); | 565 | conn->iscsi_conn, opcode, hdr->itt); |
566 | else { | 566 | else { |
567 | iser_ctask = ctask->dd_data; | 567 | iser_task = task->dd_data; |
568 | iser_dbg("itt %d ctask %p\n",hdr->itt, ctask); | 568 | iser_dbg("itt %d task %p\n",hdr->itt, task); |
569 | iser_ctask->status = ISER_TASK_STATUS_COMPLETED; | 569 | iser_task->status = ISER_TASK_STATUS_COMPLETED; |
570 | iser_ctask_rdma_finalize(iser_ctask); | 570 | iser_task_rdma_finalize(iser_task); |
571 | } | 571 | } |
572 | } | 572 | } |
573 | iser_dto_buffs_release(dto); | 573 | iser_dto_buffs_release(dto); |
@@ -578,7 +578,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
578 | kmem_cache_free(ig.desc_cache, rx_desc); | 578 | kmem_cache_free(ig.desc_cache, rx_desc); |
579 | 579 | ||
580 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * | 580 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * |
581 | * ctask eliminates the need to worry on ctasks which are completed in * | 581 | * task eliminates the need to worry on tasks which are completed in * |
582 | * parallel to the execution of iser_conn_term. So the code that waits * | 582 | * parallel to the execution of iser_conn_term. So the code that waits * |
583 | * for the posted rx bufs refcount to become zero handles everything */ | 583 | * for the posted rx bufs refcount to become zero handles everything */ |
584 | atomic_dec(&conn->ib_conn->post_recv_buf_count); | 584 | atomic_dec(&conn->ib_conn->post_recv_buf_count); |
@@ -590,7 +590,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
590 | struct iser_conn *ib_conn = dto->ib_conn; | 590 | struct iser_conn *ib_conn = dto->ib_conn; |
591 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; | 591 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; |
592 | struct iscsi_conn *conn = iser_conn->iscsi_conn; | 592 | struct iscsi_conn *conn = iser_conn->iscsi_conn; |
593 | struct iscsi_cmd_task *ctask; | 593 | struct iscsi_task *task; |
594 | int resume_tx = 0; | 594 | int resume_tx = 0; |
595 | 595 | ||
596 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); | 596 | iser_dbg("Initiator, Data sent dto=0x%p\n", dto); |
@@ -613,31 +613,31 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
613 | 613 | ||
614 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 614 | if (tx_desc->type == ISCSI_TX_CONTROL) { |
615 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 615 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
616 | ctask = (void *) ((long)(void *)tx_desc - | 616 | task = (void *) ((long)(void *)tx_desc - |
617 | sizeof(struct iscsi_cmd_task)); | 617 | sizeof(struct iscsi_task)); |
618 | if (ctask->hdr->itt == RESERVED_ITT) | 618 | if (task->hdr->itt == RESERVED_ITT) |
619 | iscsi_put_ctask(ctask); | 619 | iscsi_put_task(task); |
620 | } | 620 | } |
621 | } | 621 | } |
622 | 622 | ||
623 | void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) | 623 | void iser_task_rdma_init(struct iscsi_iser_task *iser_task) |
624 | 624 | ||
625 | { | 625 | { |
626 | iser_ctask->status = ISER_TASK_STATUS_INIT; | 626 | iser_task->status = ISER_TASK_STATUS_INIT; |
627 | 627 | ||
628 | iser_ctask->dir[ISER_DIR_IN] = 0; | 628 | iser_task->dir[ISER_DIR_IN] = 0; |
629 | iser_ctask->dir[ISER_DIR_OUT] = 0; | 629 | iser_task->dir[ISER_DIR_OUT] = 0; |
630 | 630 | ||
631 | iser_ctask->data[ISER_DIR_IN].data_len = 0; | 631 | iser_task->data[ISER_DIR_IN].data_len = 0; |
632 | iser_ctask->data[ISER_DIR_OUT].data_len = 0; | 632 | iser_task->data[ISER_DIR_OUT].data_len = 0; |
633 | 633 | ||
634 | memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, | 634 | memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, |
635 | sizeof(struct iser_regd_buf)); | 635 | sizeof(struct iser_regd_buf)); |
636 | memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, | 636 | memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, |
637 | sizeof(struct iser_regd_buf)); | 637 | sizeof(struct iser_regd_buf)); |
638 | } | 638 | } |
639 | 639 | ||
640 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | 640 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) |
641 | { | 641 | { |
642 | int deferred; | 642 | int deferred; |
643 | int is_rdma_aligned = 1; | 643 | int is_rdma_aligned = 1; |
@@ -646,17 +646,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
646 | /* if we were reading, copy back to unaligned sglist, | 646 | /* if we were reading, copy back to unaligned sglist, |
647 | * anyway dma_unmap and free the copy | 647 | * anyway dma_unmap and free the copy |
648 | */ | 648 | */ |
649 | if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { | 649 | if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { |
650 | is_rdma_aligned = 0; | 650 | is_rdma_aligned = 0; |
651 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); | 651 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); |
652 | } | 652 | } |
653 | if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { | 653 | if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { |
654 | is_rdma_aligned = 0; | 654 | is_rdma_aligned = 0; |
655 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); | 655 | iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); |
656 | } | 656 | } |
657 | 657 | ||
658 | if (iser_ctask->dir[ISER_DIR_IN]) { | 658 | if (iser_task->dir[ISER_DIR_IN]) { |
659 | regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; | 659 | regd = &iser_task->rdma_regd[ISER_DIR_IN]; |
660 | deferred = iser_regd_buff_release(regd); | 660 | deferred = iser_regd_buff_release(regd); |
661 | if (deferred) { | 661 | if (deferred) { |
662 | iser_err("%d references remain for BUF-IN rdma reg\n", | 662 | iser_err("%d references remain for BUF-IN rdma reg\n", |
@@ -664,8 +664,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
664 | } | 664 | } |
665 | } | 665 | } |
666 | 666 | ||
667 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 667 | if (iser_task->dir[ISER_DIR_OUT]) { |
668 | regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; | 668 | regd = &iser_task->rdma_regd[ISER_DIR_OUT]; |
669 | deferred = iser_regd_buff_release(regd); | 669 | deferred = iser_regd_buff_release(regd); |
670 | if (deferred) { | 670 | if (deferred) { |
671 | iser_err("%d references remain for BUF-OUT rdma reg\n", | 671 | iser_err("%d references remain for BUF-OUT rdma reg\n", |
@@ -675,7 +675,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
675 | 675 | ||
676 | /* if the data was unaligned, it was already unmapped and then copied */ | 676 | /* if the data was unaligned, it was already unmapped and then copied */ |
677 | if (is_rdma_aligned) | 677 | if (is_rdma_aligned) |
678 | iser_dma_unmap_task_data(iser_ctask); | 678 | iser_dma_unmap_task_data(iser_task); |
679 | } | 679 | } |
680 | 680 | ||
681 | void iser_dto_buffs_release(struct iser_dto *dto) | 681 | void iser_dto_buffs_release(struct iser_dto *dto) |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index cac50c4dc159..48f2a601fc27 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -101,13 +101,13 @@ void iser_reg_single(struct iser_device *device, | |||
101 | /** | 101 | /** |
102 | * iser_start_rdma_unaligned_sg | 102 | * iser_start_rdma_unaligned_sg |
103 | */ | 103 | */ |
104 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 104 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
105 | enum iser_data_dir cmd_dir) | 105 | enum iser_data_dir cmd_dir) |
106 | { | 106 | { |
107 | int dma_nents; | 107 | int dma_nents; |
108 | struct ib_device *dev; | 108 | struct ib_device *dev; |
109 | char *mem = NULL; | 109 | char *mem = NULL; |
110 | struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; | 110 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; |
111 | unsigned long cmd_data_len = data->data_len; | 111 | unsigned long cmd_data_len = data->data_len; |
112 | 112 | ||
113 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 113 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
@@ -140,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); | 143 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); |
144 | iser_ctask->data_copy[cmd_dir].buf = | 144 | iser_task->data_copy[cmd_dir].buf = |
145 | &iser_ctask->data_copy[cmd_dir].sg_single; | 145 | &iser_task->data_copy[cmd_dir].sg_single; |
146 | iser_ctask->data_copy[cmd_dir].size = 1; | 146 | iser_task->data_copy[cmd_dir].size = 1; |
147 | 147 | ||
148 | iser_ctask->data_copy[cmd_dir].copy_buf = mem; | 148 | iser_task->data_copy[cmd_dir].copy_buf = mem; |
149 | 149 | ||
150 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 150 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
151 | dma_nents = ib_dma_map_sg(dev, | 151 | dma_nents = ib_dma_map_sg(dev, |
152 | &iser_ctask->data_copy[cmd_dir].sg_single, | 152 | &iser_task->data_copy[cmd_dir].sg_single, |
153 | 1, | 153 | 1, |
154 | (cmd_dir == ISER_DIR_OUT) ? | 154 | (cmd_dir == ISER_DIR_OUT) ? |
155 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 155 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
156 | BUG_ON(dma_nents == 0); | 156 | BUG_ON(dma_nents == 0); |
157 | 157 | ||
158 | iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; | 158 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; |
159 | return 0; | 159 | return 0; |
160 | } | 160 | } |
161 | 161 | ||
162 | /** | 162 | /** |
163 | * iser_finalize_rdma_unaligned_sg | 163 | * iser_finalize_rdma_unaligned_sg |
164 | */ | 164 | */ |
165 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 165 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
166 | enum iser_data_dir cmd_dir) | 166 | enum iser_data_dir cmd_dir) |
167 | { | 167 | { |
168 | struct ib_device *dev; | 168 | struct ib_device *dev; |
169 | struct iser_data_buf *mem_copy; | 169 | struct iser_data_buf *mem_copy; |
170 | unsigned long cmd_data_len; | 170 | unsigned long cmd_data_len; |
171 | 171 | ||
172 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 172 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
173 | mem_copy = &iser_ctask->data_copy[cmd_dir]; | 173 | mem_copy = &iser_task->data_copy[cmd_dir]; |
174 | 174 | ||
175 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, | 175 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
176 | (cmd_dir == ISER_DIR_OUT) ? | 176 | (cmd_dir == ISER_DIR_OUT) ? |
@@ -186,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
186 | /* copy back read RDMA to unaligned sg */ | 186 | /* copy back read RDMA to unaligned sg */ |
187 | mem = mem_copy->copy_buf; | 187 | mem = mem_copy->copy_buf; |
188 | 188 | ||
189 | sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | 189 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; |
190 | sg_size = iser_ctask->data[ISER_DIR_IN].size; | 190 | sg_size = iser_task->data[ISER_DIR_IN].size; |
191 | 191 | ||
192 | p = mem; | 192 | p = mem; |
193 | for_each_sg(sgl, sg, sg_size, i) { | 193 | for_each_sg(sgl, sg, sg_size, i) { |
@@ -200,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
200 | } | 200 | } |
201 | } | 201 | } |
202 | 202 | ||
203 | cmd_data_len = iser_ctask->data[cmd_dir].data_len; | 203 | cmd_data_len = iser_task->data[cmd_dir].data_len; |
204 | 204 | ||
205 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 205 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
206 | free_pages((unsigned long)mem_copy->copy_buf, | 206 | free_pages((unsigned long)mem_copy->copy_buf, |
@@ -378,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data, | |||
378 | } | 378 | } |
379 | } | 379 | } |
380 | 380 | ||
381 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | 381 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
382 | struct iser_data_buf *data, | 382 | struct iser_data_buf *data, |
383 | enum iser_data_dir iser_dir, | 383 | enum iser_data_dir iser_dir, |
384 | enum dma_data_direction dma_dir) | 384 | enum dma_data_direction dma_dir) |
385 | { | 385 | { |
386 | struct ib_device *dev; | 386 | struct ib_device *dev; |
387 | 387 | ||
388 | iser_ctask->dir[iser_dir] = 1; | 388 | iser_task->dir[iser_dir] = 1; |
389 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 389 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
390 | 390 | ||
391 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); | 391 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
392 | if (data->dma_nents == 0) { | 392 | if (data->dma_nents == 0) { |
@@ -396,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | |||
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
399 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | 399 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) |
400 | { | 400 | { |
401 | struct ib_device *dev; | 401 | struct ib_device *dev; |
402 | struct iser_data_buf *data; | 402 | struct iser_data_buf *data; |
403 | 403 | ||
404 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 404 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
405 | 405 | ||
406 | if (iser_ctask->dir[ISER_DIR_IN]) { | 406 | if (iser_task->dir[ISER_DIR_IN]) { |
407 | data = &iser_ctask->data[ISER_DIR_IN]; | 407 | data = &iser_task->data[ISER_DIR_IN]; |
408 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | 408 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
409 | } | 409 | } |
410 | 410 | ||
411 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 411 | if (iser_task->dir[ISER_DIR_OUT]) { |
412 | data = &iser_ctask->data[ISER_DIR_OUT]; | 412 | data = &iser_task->data[ISER_DIR_OUT]; |
413 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); | 413 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
414 | } | 414 | } |
415 | } | 415 | } |
@@ -420,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | |||
420 | * | 420 | * |
421 | * returns 0 on success, errno code on failure | 421 | * returns 0 on success, errno code on failure |
422 | */ | 422 | */ |
423 | int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | 423 | int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
424 | enum iser_data_dir cmd_dir) | 424 | enum iser_data_dir cmd_dir) |
425 | { | 425 | { |
426 | struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; | 426 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
427 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; | 427 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; |
428 | struct iser_device *device = ib_conn->device; | 428 | struct iser_device *device = ib_conn->device; |
429 | struct ib_device *ibdev = device->ib_device; | 429 | struct ib_device *ibdev = device->ib_device; |
430 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; | 430 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
431 | struct iser_regd_buf *regd_buf; | 431 | struct iser_regd_buf *regd_buf; |
432 | int aligned_len; | 432 | int aligned_len; |
433 | int err; | 433 | int err; |
434 | int i; | 434 | int i; |
435 | struct scatterlist *sg; | 435 | struct scatterlist *sg; |
436 | 436 | ||
437 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; | 437 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
438 | 438 | ||
439 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 439 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
440 | if (aligned_len != mem->dma_nents) { | 440 | if (aligned_len != mem->dma_nents) { |
@@ -444,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
444 | iser_data_buf_dump(mem, ibdev); | 444 | iser_data_buf_dump(mem, ibdev); |
445 | 445 | ||
446 | /* unmap the command data before accessing it */ | 446 | /* unmap the command data before accessing it */ |
447 | iser_dma_unmap_task_data(iser_ctask); | 447 | iser_dma_unmap_task_data(iser_task); |
448 | 448 | ||
449 | /* allocate copy buf, if we are writing, copy the */ | 449 | /* allocate copy buf, if we are writing, copy the */ |
450 | /* unaligned scatterlist, dma map the copy */ | 450 | /* unaligned scatterlist, dma map the copy */ |
451 | if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) | 451 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | mem = &iser_ctask->data_copy[cmd_dir]; | 453 | mem = &iser_task->data_copy[cmd_dir]; |
454 | } | 454 | } |
455 | 455 | ||
456 | /* if there a single dma entry, FMR is not needed */ | 456 | /* if there a single dma entry, FMR is not needed */ |
@@ -474,8 +474,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
474 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 474 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
475 | if (err) { | 475 | if (err) { |
476 | iser_data_buf_dump(mem, ibdev); | 476 | iser_data_buf_dump(mem, ibdev); |
477 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | 477 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
478 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | 478 | mem->dma_nents, |
479 | ntoh24(iser_task->desc.iscsi_header.dlength)); | ||
479 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 480 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
480 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, | 481 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, |
481 | ib_conn->page_vec->offset); | 482 | ib_conn->page_vec->offset); |