aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/iser
diff options
context:
space:
mode:
authorErez Zilber <erezz@voltaire.com>2006-09-27 09:43:06 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-28 13:53:18 -0400
commit74a2078061409e027ccb51a28cf6174c31ab8f99 (patch)
tree281557a02ff3c5fe33e0d2dbf52fe77b6eaab474 /drivers/infiniband/ulp/iser
parent87e8df7a273c7c1acb864c90b5253609c44375a6 (diff)
IB/iser: DMA unmap unaligned for RDMA data before touching it
iSER uses the DMA mapping api to map the page holding the SCSI command data to the HCA DMA address space. When the command data is not aligned for RDMA, the data is copied to/from an allocated buffer which in turn is used for executing this command. The pages associated with the command must be unmapped before being touched. Signed-off-by: Erez Zilber <erezz@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c49
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c42
3 files changed, 59 insertions, 39 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 2826540d2f23..9c53916f28c2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -355,4 +355,11 @@ int iser_post_send(struct iser_desc *tx_desc);
355 355
356int iser_conn_state_comp(struct iser_conn *ib_conn, 356int iser_conn_state_comp(struct iser_conn *ib_conn,
357 enum iser_ib_conn_state comp); 357 enum iser_ib_conn_state comp);
358
359int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
360 struct iser_data_buf *data,
361 enum iser_data_dir iser_dir,
362 enum dma_data_direction dma_dir);
363
364void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
358#endif 365#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 14ae61e07591..9b3d79c796c8 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -66,42 +66,6 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
66 dto->regd_vector_len++; 66 dto->regd_vector_len++;
67} 67}
68 68
69static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
70 struct iser_data_buf *data,
71 enum iser_data_dir iser_dir,
72 enum dma_data_direction dma_dir)
73{
74 struct device *dma_device;
75
76 iser_ctask->dir[iser_dir] = 1;
77 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
78
79 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
80 if (data->dma_nents == 0) {
81 iser_err("dma_map_sg failed!!!\n");
82 return -EINVAL;
83 }
84 return 0;
85}
86
87static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
88{
89 struct device *dma_device;
90 struct iser_data_buf *data;
91
92 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
93
94 if (iser_ctask->dir[ISER_DIR_IN]) {
95 data = &iser_ctask->data[ISER_DIR_IN];
96 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
97 }
98
99 if (iser_ctask->dir[ISER_DIR_OUT]) {
100 data = &iser_ctask->data[ISER_DIR_OUT];
101 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
102 }
103}
104
105/* Register user buffer memory and initialize passive rdma 69/* Register user buffer memory and initialize passive rdma
106 * dto descriptor. Total data size is stored in 70 * dto descriptor. Total data size is stored in
107 * iser_ctask->data[ISER_DIR_IN].data_len 71 * iser_ctask->data[ISER_DIR_IN].data_len
@@ -699,14 +663,19 @@ void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
699void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) 663void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
700{ 664{
701 int deferred; 665 int deferred;
666 int is_rdma_aligned = 1;
702 667
703 /* if we were reading, copy back to unaligned sglist, 668 /* if we were reading, copy back to unaligned sglist,
704 * anyway dma_unmap and free the copy 669 * anyway dma_unmap and free the copy
705 */ 670 */
706 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) 671 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
672 is_rdma_aligned = 0;
707 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); 673 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
708 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) 674 }
675 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
676 is_rdma_aligned = 0;
709 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); 677 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
678 }
710 679
711 if (iser_ctask->dir[ISER_DIR_IN]) { 680 if (iser_ctask->dir[ISER_DIR_IN]) {
712 deferred = iser_regd_buff_release 681 deferred = iser_regd_buff_release
@@ -726,7 +695,9 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
726 } 695 }
727 } 696 }
728 697
729 iser_dma_unmap_task_data(iser_ctask); 698 /* if the data was unaligned, it was already unmapped and then copied */
699 if (is_rdma_aligned)
700 iser_dma_unmap_task_data(iser_ctask);
730} 701}
731 702
732void iser_dto_buffs_release(struct iser_dto *dto) 703void iser_dto_buffs_release(struct iser_dto *dto)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index d0b03f426581..0606744c3f84 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -369,6 +369,44 @@ static void iser_page_vec_build(struct iser_data_buf *data,
369 } 369 }
370} 370}
371 371
372int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
373 struct iser_data_buf *data,
374 enum iser_data_dir iser_dir,
375 enum dma_data_direction dma_dir)
376{
377 struct device *dma_device;
378
379 iser_ctask->dir[iser_dir] = 1;
380 dma_device =
381 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
382
383 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
384 if (data->dma_nents == 0) {
385 iser_err("dma_map_sg failed!!!\n");
386 return -EINVAL;
387 }
388 return 0;
389}
390
391void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
392{
393 struct device *dma_device;
394 struct iser_data_buf *data;
395
396 dma_device =
397 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
398
399 if (iser_ctask->dir[ISER_DIR_IN]) {
400 data = &iser_ctask->data[ISER_DIR_IN];
401 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
402 }
403
404 if (iser_ctask->dir[ISER_DIR_OUT]) {
405 data = &iser_ctask->data[ISER_DIR_OUT];
406 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
407 }
408}
409
372/** 410/**
373 * iser_reg_rdma_mem - Registers memory intended for RDMA, 411 * iser_reg_rdma_mem - Registers memory intended for RDMA,
374 * obtaining rkey and va 412 * obtaining rkey and va
@@ -394,6 +432,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
394 iser_err("rdma alignment violation %d/%d aligned\n", 432 iser_err("rdma alignment violation %d/%d aligned\n",
395 aligned_len, mem->size); 433 aligned_len, mem->size);
396 iser_data_buf_dump(mem); 434 iser_data_buf_dump(mem);
435
436 /* unmap the command data before accessing it */
437 iser_dma_unmap_task_data(iser_ctask);
438
397 /* allocate copy buf, if we are writing, copy the */ 439 /* allocate copy buf, if we are writing, copy the */
398 /* unaligned scatterlist, dma map the copy */ 440 /* unaligned scatterlist, dma map the copy */
399 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) 441 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)