diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2008-05-21 16:54:11 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-07-12 09:22:20 -0400 |
commit | 2261ec3d686e35c1a6088ab7f00a1d02b528b994 (patch) | |
tree | 7836e5ddf152861d074916ce1838941158138315 /drivers/infiniband/ulp/iser/iser_memory.c | |
parent | 135a8ad4e09309d36dcb8b5c7f55db0b6a15b2d6 (diff) |
[SCSI] iser: handle iscsi_cmd_task rename
This handles the iscsi_cmd_task rename and renames
the iser cmd task to iser task.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_memory.c')
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 77 |
1 files changed, 39 insertions, 38 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index cac50c4dc159..48f2a601fc27 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -101,13 +101,13 @@ void iser_reg_single(struct iser_device *device, | |||
101 | /** | 101 | /** |
102 | * iser_start_rdma_unaligned_sg | 102 | * iser_start_rdma_unaligned_sg |
103 | */ | 103 | */ |
104 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 104 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
105 | enum iser_data_dir cmd_dir) | 105 | enum iser_data_dir cmd_dir) |
106 | { | 106 | { |
107 | int dma_nents; | 107 | int dma_nents; |
108 | struct ib_device *dev; | 108 | struct ib_device *dev; |
109 | char *mem = NULL; | 109 | char *mem = NULL; |
110 | struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; | 110 | struct iser_data_buf *data = &iser_task->data[cmd_dir]; |
111 | unsigned long cmd_data_len = data->data_len; | 111 | unsigned long cmd_data_len = data->data_len; |
112 | 112 | ||
113 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 113 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
@@ -140,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); | 143 | sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); |
144 | iser_ctask->data_copy[cmd_dir].buf = | 144 | iser_task->data_copy[cmd_dir].buf = |
145 | &iser_ctask->data_copy[cmd_dir].sg_single; | 145 | &iser_task->data_copy[cmd_dir].sg_single; |
146 | iser_ctask->data_copy[cmd_dir].size = 1; | 146 | iser_task->data_copy[cmd_dir].size = 1; |
147 | 147 | ||
148 | iser_ctask->data_copy[cmd_dir].copy_buf = mem; | 148 | iser_task->data_copy[cmd_dir].copy_buf = mem; |
149 | 149 | ||
150 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 150 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
151 | dma_nents = ib_dma_map_sg(dev, | 151 | dma_nents = ib_dma_map_sg(dev, |
152 | &iser_ctask->data_copy[cmd_dir].sg_single, | 152 | &iser_task->data_copy[cmd_dir].sg_single, |
153 | 1, | 153 | 1, |
154 | (cmd_dir == ISER_DIR_OUT) ? | 154 | (cmd_dir == ISER_DIR_OUT) ? |
155 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 155 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
156 | BUG_ON(dma_nents == 0); | 156 | BUG_ON(dma_nents == 0); |
157 | 157 | ||
158 | iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; | 158 | iser_task->data_copy[cmd_dir].dma_nents = dma_nents; |
159 | return 0; | 159 | return 0; |
160 | } | 160 | } |
161 | 161 | ||
162 | /** | 162 | /** |
163 | * iser_finalize_rdma_unaligned_sg | 163 | * iser_finalize_rdma_unaligned_sg |
164 | */ | 164 | */ |
165 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 165 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, |
166 | enum iser_data_dir cmd_dir) | 166 | enum iser_data_dir cmd_dir) |
167 | { | 167 | { |
168 | struct ib_device *dev; | 168 | struct ib_device *dev; |
169 | struct iser_data_buf *mem_copy; | 169 | struct iser_data_buf *mem_copy; |
170 | unsigned long cmd_data_len; | 170 | unsigned long cmd_data_len; |
171 | 171 | ||
172 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 172 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
173 | mem_copy = &iser_ctask->data_copy[cmd_dir]; | 173 | mem_copy = &iser_task->data_copy[cmd_dir]; |
174 | 174 | ||
175 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, | 175 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
176 | (cmd_dir == ISER_DIR_OUT) ? | 176 | (cmd_dir == ISER_DIR_OUT) ? |
@@ -186,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
186 | /* copy back read RDMA to unaligned sg */ | 186 | /* copy back read RDMA to unaligned sg */ |
187 | mem = mem_copy->copy_buf; | 187 | mem = mem_copy->copy_buf; |
188 | 188 | ||
189 | sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | 189 | sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; |
190 | sg_size = iser_ctask->data[ISER_DIR_IN].size; | 190 | sg_size = iser_task->data[ISER_DIR_IN].size; |
191 | 191 | ||
192 | p = mem; | 192 | p = mem; |
193 | for_each_sg(sgl, sg, sg_size, i) { | 193 | for_each_sg(sgl, sg, sg_size, i) { |
@@ -200,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
200 | } | 200 | } |
201 | } | 201 | } |
202 | 202 | ||
203 | cmd_data_len = iser_ctask->data[cmd_dir].data_len; | 203 | cmd_data_len = iser_task->data[cmd_dir].data_len; |
204 | 204 | ||
205 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) | 205 | if (cmd_data_len > ISER_KMALLOC_THRESHOLD) |
206 | free_pages((unsigned long)mem_copy->copy_buf, | 206 | free_pages((unsigned long)mem_copy->copy_buf, |
@@ -378,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data, | |||
378 | } | 378 | } |
379 | } | 379 | } |
380 | 380 | ||
381 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | 381 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
382 | struct iser_data_buf *data, | 382 | struct iser_data_buf *data, |
383 | enum iser_data_dir iser_dir, | 383 | enum iser_data_dir iser_dir, |
384 | enum dma_data_direction dma_dir) | 384 | enum dma_data_direction dma_dir) |
385 | { | 385 | { |
386 | struct ib_device *dev; | 386 | struct ib_device *dev; |
387 | 387 | ||
388 | iser_ctask->dir[iser_dir] = 1; | 388 | iser_task->dir[iser_dir] = 1; |
389 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 389 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
390 | 390 | ||
391 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); | 391 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
392 | if (data->dma_nents == 0) { | 392 | if (data->dma_nents == 0) { |
@@ -396,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | |||
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
399 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | 399 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) |
400 | { | 400 | { |
401 | struct ib_device *dev; | 401 | struct ib_device *dev; |
402 | struct iser_data_buf *data; | 402 | struct iser_data_buf *data; |
403 | 403 | ||
404 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; | 404 | dev = iser_task->iser_conn->ib_conn->device->ib_device; |
405 | 405 | ||
406 | if (iser_ctask->dir[ISER_DIR_IN]) { | 406 | if (iser_task->dir[ISER_DIR_IN]) { |
407 | data = &iser_ctask->data[ISER_DIR_IN]; | 407 | data = &iser_task->data[ISER_DIR_IN]; |
408 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | 408 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
409 | } | 409 | } |
410 | 410 | ||
411 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 411 | if (iser_task->dir[ISER_DIR_OUT]) { |
412 | data = &iser_ctask->data[ISER_DIR_OUT]; | 412 | data = &iser_task->data[ISER_DIR_OUT]; |
413 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); | 413 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
414 | } | 414 | } |
415 | } | 415 | } |
@@ -420,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | |||
420 | * | 420 | * |
421 | * returns 0 on success, errno code on failure | 421 | * returns 0 on success, errno code on failure |
422 | */ | 422 | */ |
423 | int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | 423 | int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
424 | enum iser_data_dir cmd_dir) | 424 | enum iser_data_dir cmd_dir) |
425 | { | 425 | { |
426 | struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; | 426 | struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; |
427 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; | 427 | struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; |
428 | struct iser_device *device = ib_conn->device; | 428 | struct iser_device *device = ib_conn->device; |
429 | struct ib_device *ibdev = device->ib_device; | 429 | struct ib_device *ibdev = device->ib_device; |
430 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; | 430 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
431 | struct iser_regd_buf *regd_buf; | 431 | struct iser_regd_buf *regd_buf; |
432 | int aligned_len; | 432 | int aligned_len; |
433 | int err; | 433 | int err; |
434 | int i; | 434 | int i; |
435 | struct scatterlist *sg; | 435 | struct scatterlist *sg; |
436 | 436 | ||
437 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; | 437 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
438 | 438 | ||
439 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 439 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
440 | if (aligned_len != mem->dma_nents) { | 440 | if (aligned_len != mem->dma_nents) { |
@@ -444,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
444 | iser_data_buf_dump(mem, ibdev); | 444 | iser_data_buf_dump(mem, ibdev); |
445 | 445 | ||
446 | /* unmap the command data before accessing it */ | 446 | /* unmap the command data before accessing it */ |
447 | iser_dma_unmap_task_data(iser_ctask); | 447 | iser_dma_unmap_task_data(iser_task); |
448 | 448 | ||
449 | /* allocate copy buf, if we are writing, copy the */ | 449 | /* allocate copy buf, if we are writing, copy the */ |
450 | /* unaligned scatterlist, dma map the copy */ | 450 | /* unaligned scatterlist, dma map the copy */ |
451 | if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) | 451 | if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | mem = &iser_ctask->data_copy[cmd_dir]; | 453 | mem = &iser_task->data_copy[cmd_dir]; |
454 | } | 454 | } |
455 | 455 | ||
456 | /* if there a single dma entry, FMR is not needed */ | 456 | /* if there a single dma entry, FMR is not needed */ |
@@ -474,8 +474,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
474 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 474 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
475 | if (err) { | 475 | if (err) { |
476 | iser_data_buf_dump(mem, ibdev); | 476 | iser_data_buf_dump(mem, ibdev); |
477 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | 477 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
478 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | 478 | mem->dma_nents, |
479 | ntoh24(iser_task->desc.iscsi_header.dlength)); | ||
479 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 480 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
480 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, | 481 | ib_conn->page_vec->data_size, ib_conn->page_vec->length, |
481 | ib_conn->page_vec->offset); | 482 | ib_conn->page_vec->offset); |