aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig13
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c60
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c42
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
6 files changed, 80 insertions, 54 deletions
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index 365a1b5f19e0..aecbb9083f0c 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,11 +1,12 @@
1config INFINIBAND_ISER 1config INFINIBAND_ISER
2 tristate "ISCSI RDMA Protocol" 2 tristate "iSCSI Extensions for RDMA (iSER)"
3 depends on INFINIBAND && SCSI && INET 3 depends on INFINIBAND && SCSI && INET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 Support for the ISCSI RDMA Protocol over InfiniBand. This 6 Support for the iSCSI Extensions for RDMA (iSER) Protocol
7 allows you to access storage devices that speak ISER/ISCSI 7 over InfiniBand. This allows you to access storage devices
8 over InfiniBand. 8 that speak iSCSI over iSER over InfiniBand.
9 9
10 The ISER protocol is defined by IETF. 10 The iSER protocol is defined by IETF.
11 See <http://www.ietf.org/>. 11 See <http://www.ietf.org/internet-drafts/draft-ietf-ips-iser-05.txt>
12 and <http://www.infinibandta.org/members/spec/iser_annex_060418.pdf>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2a14fe2e3226..eb6f98d82289 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -317,6 +317,8 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
317 struct iscsi_iser_conn *iser_conn = conn->dd_data; 317 struct iscsi_iser_conn *iser_conn = conn->dd_data;
318 318
319 iscsi_conn_teardown(cls_conn); 319 iscsi_conn_teardown(cls_conn);
320 if (iser_conn->ib_conn)
321 iser_conn->ib_conn->iser_conn = NULL;
320 kfree(iser_conn); 322 kfree(iser_conn);
321} 323}
322 324
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 2cf9ae0def1c..9c53916f28c2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -192,7 +192,7 @@ struct iser_regd_buf {
192 192
193struct iser_dto { 193struct iser_dto {
194 struct iscsi_iser_cmd_task *ctask; 194 struct iscsi_iser_cmd_task *ctask;
195 struct iscsi_iser_conn *conn; 195 struct iser_conn *ib_conn;
196 int notify_enable; 196 int notify_enable;
197 197
198 /* vector of registered buffers */ 198 /* vector of registered buffers */
@@ -355,4 +355,11 @@ int iser_post_send(struct iser_desc *tx_desc);
355 355
356int iser_conn_state_comp(struct iser_conn *ib_conn, 356int iser_conn_state_comp(struct iser_conn *ib_conn,
357 enum iser_ib_conn_state comp); 357 enum iser_ib_conn_state comp);
358
359int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
360 struct iser_data_buf *data,
361 enum iser_data_dir iser_dir,
362 enum dma_data_direction dma_dir);
363
364void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
358#endif 365#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ccf56f6f7236..9b3d79c796c8 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -66,42 +66,6 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
66 dto->regd_vector_len++; 66 dto->regd_vector_len++;
67} 67}
68 68
69static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
70 struct iser_data_buf *data,
71 enum iser_data_dir iser_dir,
72 enum dma_data_direction dma_dir)
73{
74 struct device *dma_device;
75
76 iser_ctask->dir[iser_dir] = 1;
77 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
78
79 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
80 if (data->dma_nents == 0) {
81 iser_err("dma_map_sg failed!!!\n");
82 return -EINVAL;
83 }
84 return 0;
85}
86
87static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
88{
89 struct device *dma_device;
90 struct iser_data_buf *data;
91
92 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
93
94 if (iser_ctask->dir[ISER_DIR_IN]) {
95 data = &iser_ctask->data[ISER_DIR_IN];
96 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
97 }
98
99 if (iser_ctask->dir[ISER_DIR_OUT]) {
100 data = &iser_ctask->data[ISER_DIR_OUT];
101 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
102 }
103}
104
105/* Register user buffer memory and initialize passive rdma 69/* Register user buffer memory and initialize passive rdma
106 * dto descriptor. Total data size is stored in 70 * dto descriptor. Total data size is stored in
107 * iser_ctask->data[ISER_DIR_IN].data_len 71 * iser_ctask->data[ISER_DIR_IN].data_len
@@ -249,7 +213,7 @@ static int iser_post_receive_control(struct iscsi_conn *conn)
249 } 213 }
250 214
251 recv_dto = &rx_desc->dto; 215 recv_dto = &rx_desc->dto;
252 recv_dto->conn = iser_conn; 216 recv_dto->ib_conn = iser_conn->ib_conn;
253 recv_dto->regd_vector_len = 0; 217 recv_dto->regd_vector_len = 0;
254 218
255 regd_hdr = &rx_desc->hdr_regd_buf; 219 regd_hdr = &rx_desc->hdr_regd_buf;
@@ -296,7 +260,7 @@ static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
296 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ 260 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */
297 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 261 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
298 262
299 send_dto->conn = iser_conn; 263 send_dto->ib_conn = iser_conn->ib_conn;
300 send_dto->notify_enable = 1; 264 send_dto->notify_enable = 1;
301 send_dto->regd_vector_len = 0; 265 send_dto->regd_vector_len = 0;
302 266
@@ -588,7 +552,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
588 unsigned long dto_xfer_len) 552 unsigned long dto_xfer_len)
589{ 553{
590 struct iser_dto *dto = &rx_desc->dto; 554 struct iser_dto *dto = &rx_desc->dto;
591 struct iscsi_iser_conn *conn = dto->conn; 555 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
592 struct iscsi_session *session = conn->iscsi_conn->session; 556 struct iscsi_session *session = conn->iscsi_conn->session;
593 struct iscsi_cmd_task *ctask; 557 struct iscsi_cmd_task *ctask;
594 struct iscsi_iser_cmd_task *iser_ctask; 558 struct iscsi_iser_cmd_task *iser_ctask;
@@ -641,7 +605,8 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
641void iser_snd_completion(struct iser_desc *tx_desc) 605void iser_snd_completion(struct iser_desc *tx_desc)
642{ 606{
643 struct iser_dto *dto = &tx_desc->dto; 607 struct iser_dto *dto = &tx_desc->dto;
644 struct iscsi_iser_conn *iser_conn = dto->conn; 608 struct iser_conn *ib_conn = dto->ib_conn;
609 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
645 struct iscsi_conn *conn = iser_conn->iscsi_conn; 610 struct iscsi_conn *conn = iser_conn->iscsi_conn;
646 struct iscsi_mgmt_task *mtask; 611 struct iscsi_mgmt_task *mtask;
647 612
@@ -652,7 +617,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
652 if (tx_desc->type == ISCSI_TX_DATAOUT) 617 if (tx_desc->type == ISCSI_TX_DATAOUT)
653 kmem_cache_free(ig.desc_cache, tx_desc); 618 kmem_cache_free(ig.desc_cache, tx_desc);
654 619
655 atomic_dec(&iser_conn->ib_conn->post_send_buf_count); 620 atomic_dec(&ib_conn->post_send_buf_count);
656 621
657 write_lock(conn->recv_lock); 622 write_lock(conn->recv_lock);
658 if (conn->suspend_tx) { 623 if (conn->suspend_tx) {
@@ -698,14 +663,19 @@ void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
698void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) 663void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
699{ 664{
700 int deferred; 665 int deferred;
666 int is_rdma_aligned = 1;
701 667
702 /* if we were reading, copy back to unaligned sglist, 668 /* if we were reading, copy back to unaligned sglist,
703 * anyway dma_unmap and free the copy 669 * anyway dma_unmap and free the copy
704 */ 670 */
705 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) 671 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
672 is_rdma_aligned = 0;
706 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); 673 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
707 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) 674 }
675 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
676 is_rdma_aligned = 0;
708 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); 677 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
678 }
709 679
710 if (iser_ctask->dir[ISER_DIR_IN]) { 680 if (iser_ctask->dir[ISER_DIR_IN]) {
711 deferred = iser_regd_buff_release 681 deferred = iser_regd_buff_release
@@ -725,7 +695,9 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
725 } 695 }
726 } 696 }
727 697
728 iser_dma_unmap_task_data(iser_ctask); 698 /* if the data was unaligned, it was already unmapped and then copied */
699 if (is_rdma_aligned)
700 iser_dma_unmap_task_data(iser_ctask);
729} 701}
730 702
731void iser_dto_buffs_release(struct iser_dto *dto) 703void iser_dto_buffs_release(struct iser_dto *dto)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index d0b03f426581..0606744c3f84 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -369,6 +369,44 @@ static void iser_page_vec_build(struct iser_data_buf *data,
369 } 369 }
370} 370}
371 371
372int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
373 struct iser_data_buf *data,
374 enum iser_data_dir iser_dir,
375 enum dma_data_direction dma_dir)
376{
377 struct device *dma_device;
378
379 iser_ctask->dir[iser_dir] = 1;
380 dma_device =
381 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
382
383 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
384 if (data->dma_nents == 0) {
385 iser_err("dma_map_sg failed!!!\n");
386 return -EINVAL;
387 }
388 return 0;
389}
390
391void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
392{
393 struct device *dma_device;
394 struct iser_data_buf *data;
395
396 dma_device =
397 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
398
399 if (iser_ctask->dir[ISER_DIR_IN]) {
400 data = &iser_ctask->data[ISER_DIR_IN];
401 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
402 }
403
404 if (iser_ctask->dir[ISER_DIR_OUT]) {
405 data = &iser_ctask->data[ISER_DIR_OUT];
406 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
407 }
408}
409
372/** 410/**
373 * iser_reg_rdma_mem - Registers memory intended for RDMA, 411 * iser_reg_rdma_mem - Registers memory intended for RDMA,
374 * obtaining rkey and va 412 * obtaining rkey and va
@@ -394,6 +432,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
394 iser_err("rdma alignment violation %d/%d aligned\n", 432 iser_err("rdma alignment violation %d/%d aligned\n",
395 aligned_len, mem->size); 433 aligned_len, mem->size);
396 iser_data_buf_dump(mem); 434 iser_data_buf_dump(mem);
435
436 /* unmap the command data before accessing it */
437 iser_dma_unmap_task_data(iser_ctask);
438
397 /* allocate copy buf, if we are writing, copy the */ 439 /* allocate copy buf, if we are writing, copy the */
398 /* unaligned scatterlist, dma map the copy */ 440 /* unaligned scatterlist, dma map the copy */
399 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) 441 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ecdca7fc1e4c..18a000034996 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -571,6 +571,8 @@ void iser_conn_release(struct iser_conn *ib_conn)
571 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 571 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
572 if (device != NULL) 572 if (device != NULL)
573 iser_device_try_release(device); 573 iser_device_try_release(device);
574 if (ib_conn->iser_conn)
575 ib_conn->iser_conn->ib_conn = NULL;
574 kfree(ib_conn); 576 kfree(ib_conn);
575} 577}
576 578
@@ -694,7 +696,7 @@ int iser_post_recv(struct iser_desc *rx_desc)
694 struct iser_dto *recv_dto = &rx_desc->dto; 696 struct iser_dto *recv_dto = &rx_desc->dto;
695 697
696 /* Retrieve conn */ 698 /* Retrieve conn */
697 ib_conn = recv_dto->conn->ib_conn; 699 ib_conn = recv_dto->ib_conn;
698 700
699 iser_dto_to_iov(recv_dto, iov, 2); 701 iser_dto_to_iov(recv_dto, iov, 2);
700 702
@@ -727,7 +729,7 @@ int iser_post_send(struct iser_desc *tx_desc)
727 struct iser_conn *ib_conn; 729 struct iser_conn *ib_conn;
728 struct iser_dto *dto = &tx_desc->dto; 730 struct iser_dto *dto = &tx_desc->dto;
729 731
730 ib_conn = dto->conn->ib_conn; 732 ib_conn = dto->ib_conn;
731 733
732 iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN); 734 iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
733 735
@@ -774,7 +776,7 @@ static void iser_comp_error_worker(void *data)
774static void iser_handle_comp_error(struct iser_desc *desc) 776static void iser_handle_comp_error(struct iser_desc *desc)
775{ 777{
776 struct iser_dto *dto = &desc->dto; 778 struct iser_dto *dto = &desc->dto;
777 struct iser_conn *ib_conn = dto->conn->ib_conn; 779 struct iser_conn *ib_conn = dto->ib_conn;
778 780
779 iser_dto_buffs_release(dto); 781 iser_dto_buffs_release(dto);
780 782