aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/iser/iser_memory.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-03-05 12:43:45 -0500
committerRoland Dreier <roland@purestorage.com>2014-03-18 01:33:57 -0400
commit5f588e3d0c9483ef2fd35f7fe5e104f236b704f8 (patch)
treee42327a90075dd51ae9d5932524186c93a014d52 /drivers/infiniband/ulp/iser/iser_memory.c
parent9a8b08fad2efb3b6c8c5375dbaac5f4e1d19f206 (diff)
IB/iser: Generalize fall_to_bounce_buf routine
Unaligned SG-lists may also happen for protection information. Generalize bounce buffer routine to handle any iser_data_buf which may be data and/or protection. This patch does not change any functionality. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_memory.c')
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index a9335080ae69..2c3f4b144a1a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -45,13 +45,19 @@
45 * iser_start_rdma_unaligned_sg 45 * iser_start_rdma_unaligned_sg
46 */ 46 */
47static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 47static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
48 struct iser_data_buf *data,
49 struct iser_data_buf *data_copy,
48 enum iser_data_dir cmd_dir) 50 enum iser_data_dir cmd_dir)
49{ 51{
50 int dma_nents; 52 struct ib_device *dev = iser_task->iser_conn->ib_conn->device->ib_device;
51 struct ib_device *dev; 53 struct scatterlist *sgl = (struct scatterlist *)data->buf;
54 struct scatterlist *sg;
52 char *mem = NULL; 55 char *mem = NULL;
53 struct iser_data_buf *data = &iser_task->data[cmd_dir]; 56 unsigned long cmd_data_len = 0;
54 unsigned long cmd_data_len = data->data_len; 57 int dma_nents, i;
58
59 for_each_sg(sgl, sg, data->size, i)
60 cmd_data_len += ib_sg_dma_len(dev, sg);
55 61
56 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 62 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
57 mem = (void *)__get_free_pages(GFP_ATOMIC, 63 mem = (void *)__get_free_pages(GFP_ATOMIC,
@@ -61,17 +67,16 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
61 67
62 if (mem == NULL) { 68 if (mem == NULL) {
63 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 69 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
64 data->size,(int)cmd_data_len); 70 data->size, (int)cmd_data_len);
65 return -ENOMEM; 71 return -ENOMEM;
66 } 72 }
67 73
68 if (cmd_dir == ISER_DIR_OUT) { 74 if (cmd_dir == ISER_DIR_OUT) {
69 /* copy the unaligned sg the buffer which is used for RDMA */ 75 /* copy the unaligned sg the buffer which is used for RDMA */
70 struct scatterlist *sgl = (struct scatterlist *)data->buf;
71 struct scatterlist *sg;
72 int i; 76 int i;
73 char *p, *from; 77 char *p, *from;
74 78
79 sgl = (struct scatterlist *)data->buf;
75 p = mem; 80 p = mem;
76 for_each_sg(sgl, sg, data->size, i) { 81 for_each_sg(sgl, sg, data->size, i) {
77 from = kmap_atomic(sg_page(sg)); 82 from = kmap_atomic(sg_page(sg));
@@ -83,22 +88,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
83 } 88 }
84 } 89 }
85 90
86 sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); 91 sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
87 iser_task->data_copy[cmd_dir].buf = 92 data_copy->buf = &data_copy->sg_single;
88 &iser_task->data_copy[cmd_dir].sg_single; 93 data_copy->size = 1;
89 iser_task->data_copy[cmd_dir].size = 1; 94 data_copy->copy_buf = mem;
90 95
91 iser_task->data_copy[cmd_dir].copy_buf = mem; 96 dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
92
93 dev = iser_task->iser_conn->ib_conn->device->ib_device;
94 dma_nents = ib_dma_map_sg(dev,
95 &iser_task->data_copy[cmd_dir].sg_single,
96 1,
97 (cmd_dir == ISER_DIR_OUT) ? 97 (cmd_dir == ISER_DIR_OUT) ?
98 DMA_TO_DEVICE : DMA_FROM_DEVICE); 98 DMA_TO_DEVICE : DMA_FROM_DEVICE);
99 BUG_ON(dma_nents == 0); 99 BUG_ON(dma_nents == 0);
100 100
101 iser_task->data_copy[cmd_dir].dma_nents = dma_nents; 101 data_copy->dma_nents = dma_nents;
102 data_copy->data_len = cmd_data_len;
103
102 return 0; 104 return 0;
103} 105}
104 106
@@ -341,11 +343,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
341 343
342static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 344static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
343 struct ib_device *ibdev, 345 struct ib_device *ibdev,
346 struct iser_data_buf *mem,
347 struct iser_data_buf *mem_copy,
344 enum iser_data_dir cmd_dir, 348 enum iser_data_dir cmd_dir,
345 int aligned_len) 349 int aligned_len)
346{ 350{
347 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; 351 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
348 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
349 352
350 iscsi_conn->fmr_unalign_cnt++; 353 iscsi_conn->fmr_unalign_cnt++;
351 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", 354 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
@@ -355,12 +358,12 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
355 iser_data_buf_dump(mem, ibdev); 358 iser_data_buf_dump(mem, ibdev);
356 359
357 /* unmap the command data before accessing it */ 360 /* unmap the command data before accessing it */
358 iser_dma_unmap_task_data(iser_task, &iser_task->data[cmd_dir]); 361 iser_dma_unmap_task_data(iser_task, mem);
359 362
360 /* allocate copy buf, if we are writing, copy the */ 363 /* allocate copy buf, if we are writing, copy the */
361 /* unaligned scatterlist, dma map the copy */ 364 /* unaligned scatterlist, dma map the copy */
362 if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) 365 if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
363 return -ENOMEM; 366 return -ENOMEM;
364 367
365 return 0; 368 return 0;
366} 369}
@@ -388,7 +391,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
388 391
389 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 392 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
390 if (aligned_len != mem->dma_nents) { 393 if (aligned_len != mem->dma_nents) {
391 err = fall_to_bounce_buf(iser_task, ibdev, 394 err = fall_to_bounce_buf(iser_task, ibdev, mem,
395 &iser_task->data_copy[cmd_dir],
392 cmd_dir, aligned_len); 396 cmd_dir, aligned_len);
393 if (err) { 397 if (err) {
394 iser_err("failed to allocate bounce buffer\n"); 398 iser_err("failed to allocate bounce buffer\n");
@@ -536,7 +540,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
536 540
537 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 541 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
538 if (aligned_len != mem->dma_nents) { 542 if (aligned_len != mem->dma_nents) {
539 err = fall_to_bounce_buf(iser_task, ibdev, 543 err = fall_to_bounce_buf(iser_task, ibdev, mem,
544 &iser_task->data_copy[cmd_dir],
540 cmd_dir, aligned_len); 545 cmd_dir, aligned_len);
541 if (err) { 546 if (err) {
542 iser_err("failed to allocate bounce buffer\n"); 547 iser_err("failed to allocate bounce buffer\n");