diff options
author | Or Gerlitz <ogerlitz@voltaire.com> | 2010-02-08 08:19:21 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-02-24 12:41:11 -0500 |
commit | 78ad0a34dc138047529058c5f2265664cb70a052 (patch) | |
tree | e83ddb3bb798e86589bef5d3e38cafacdd997b2b | |
parent | 704315f082d473b34047817f0a6a01924f38501e (diff) |
IB/iser: Use different CQ for send completions
Use a different CQ for send completions, where send completions are
polled by the interrupt-driven receive completion handler. Therefore,
interrupts aren't used for the send CQ.
Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 110 |
2 files changed, 76 insertions, 37 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 4491235340de..a314576be4bf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -247,7 +247,8 @@ struct iser_rx_desc { | |||
247 | struct iser_device { | 247 | struct iser_device { |
248 | struct ib_device *ib_device; | 248 | struct ib_device *ib_device; |
249 | struct ib_pd *pd; | 249 | struct ib_pd *pd; |
250 | struct ib_cq *cq; | 250 | struct ib_cq *rx_cq; |
251 | struct ib_cq *tx_cq; | ||
251 | struct ib_mr *mr; | 252 | struct ib_mr *mr; |
252 | struct tasklet_struct cq_tasklet; | 253 | struct tasklet_struct cq_tasklet; |
253 | struct list_head ig_list; /* entry in ig devices list */ | 254 | struct list_head ig_list; /* entry in ig devices list */ |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 202c00dc6a76..218aa10939a0 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -37,9 +37,8 @@ | |||
37 | #include "iscsi_iser.h" | 37 | #include "iscsi_iser.h" |
38 | 38 | ||
39 | #define ISCSI_ISER_MAX_CONN 8 | 39 | #define ISCSI_ISER_MAX_CONN 8 |
40 | #define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \ | 40 | #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) |
41 | ISER_QP_MAX_REQ_DTOS) * \ | 41 | #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) |
42 | ISCSI_ISER_MAX_CONN) | ||
43 | 42 | ||
44 | static void iser_cq_tasklet_fn(unsigned long data); | 43 | static void iser_cq_tasklet_fn(unsigned long data); |
45 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 44 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
67 | if (IS_ERR(device->pd)) | 66 | if (IS_ERR(device->pd)) |
68 | goto pd_err; | 67 | goto pd_err; |
69 | 68 | ||
70 | device->cq = ib_create_cq(device->ib_device, | 69 | device->rx_cq = ib_create_cq(device->ib_device, |
71 | iser_cq_callback, | 70 | iser_cq_callback, |
72 | iser_cq_event_callback, | 71 | iser_cq_event_callback, |
73 | (void *)device, | 72 | (void *)device, |
74 | ISER_MAX_CQ_LEN, 0); | 73 | ISER_MAX_RX_CQ_LEN, 0); |
75 | if (IS_ERR(device->cq)) | 74 | if (IS_ERR(device->rx_cq)) |
76 | goto cq_err; | 75 | goto rx_cq_err; |
77 | 76 | ||
78 | if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) | 77 | device->tx_cq = ib_create_cq(device->ib_device, |
78 | NULL, iser_cq_event_callback, | ||
79 | (void *)device, | ||
80 | ISER_MAX_TX_CQ_LEN, 0); | ||
81 | |||
82 | if (IS_ERR(device->tx_cq)) | ||
83 | goto tx_cq_err; | ||
84 | |||
85 | if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) | ||
79 | goto cq_arm_err; | 86 | goto cq_arm_err; |
80 | 87 | ||
81 | tasklet_init(&device->cq_tasklet, | 88 | tasklet_init(&device->cq_tasklet, |
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
93 | dma_mr_err: | 100 | dma_mr_err: |
94 | tasklet_kill(&device->cq_tasklet); | 101 | tasklet_kill(&device->cq_tasklet); |
95 | cq_arm_err: | 102 | cq_arm_err: |
96 | ib_destroy_cq(device->cq); | 103 | ib_destroy_cq(device->tx_cq); |
97 | cq_err: | 104 | tx_cq_err: |
105 | ib_destroy_cq(device->rx_cq); | ||
106 | rx_cq_err: | ||
98 | ib_dealloc_pd(device->pd); | 107 | ib_dealloc_pd(device->pd); |
99 | pd_err: | 108 | pd_err: |
100 | iser_err("failed to allocate an IB resource\n"); | 109 | iser_err("failed to allocate an IB resource\n"); |
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
112 | tasklet_kill(&device->cq_tasklet); | 121 | tasklet_kill(&device->cq_tasklet); |
113 | 122 | ||
114 | (void)ib_dereg_mr(device->mr); | 123 | (void)ib_dereg_mr(device->mr); |
115 | (void)ib_destroy_cq(device->cq); | 124 | (void)ib_destroy_cq(device->tx_cq); |
125 | (void)ib_destroy_cq(device->rx_cq); | ||
116 | (void)ib_dealloc_pd(device->pd); | 126 | (void)ib_dealloc_pd(device->pd); |
117 | 127 | ||
118 | device->mr = NULL; | 128 | device->mr = NULL; |
119 | device->cq = NULL; | 129 | device->tx_cq = NULL; |
130 | device->rx_cq = NULL; | ||
120 | device->pd = NULL; | 131 | device->pd = NULL; |
121 | } | 132 | } |
122 | 133 | ||
@@ -179,8 +190,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
179 | 190 | ||
180 | init_attr.event_handler = iser_qp_event_callback; | 191 | init_attr.event_handler = iser_qp_event_callback; |
181 | init_attr.qp_context = (void *)ib_conn; | 192 | init_attr.qp_context = (void *)ib_conn; |
182 | init_attr.send_cq = device->cq; | 193 | init_attr.send_cq = device->tx_cq; |
183 | init_attr.recv_cq = device->cq; | 194 | init_attr.recv_cq = device->rx_cq; |
184 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | 195 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; |
185 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; | 196 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; |
186 | init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; | 197 | init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; |
@@ -772,18 +783,8 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
772 | static void iser_handle_comp_error(struct iser_desc *desc, | 783 | static void iser_handle_comp_error(struct iser_desc *desc, |
773 | struct iser_conn *ib_conn) | 784 | struct iser_conn *ib_conn) |
774 | { | 785 | { |
775 | struct iser_rx_desc *rx = (struct iser_rx_desc *)desc; | 786 | if (desc && desc->type == ISCSI_TX_DATAOUT) |
776 | struct iser_rx_desc *rx_first = ib_conn->rx_descs; | 787 | kmem_cache_free(ig.desc_cache, desc); |
777 | struct iser_rx_desc *rx_last = rx_first + (ISER_QP_MAX_RECV_DTOS - 1); | ||
778 | |||
779 | if ((char *)desc == ib_conn->login_buf || | ||
780 | (rx_first <= rx && rx <= rx_last)) | ||
781 | ib_conn->post_recv_buf_count--; | ||
782 | else { /* type is TX control/command/dataout */ | ||
783 | if (desc->type == ISCSI_TX_DATAOUT) | ||
784 | kmem_cache_free(ig.desc_cache, desc); | ||
785 | atomic_dec(&ib_conn->post_send_buf_count); | ||
786 | } | ||
787 | 788 | ||
788 | if (ib_conn->post_recv_buf_count == 0 && | 789 | if (ib_conn->post_recv_buf_count == 0 && |
789 | atomic_read(&ib_conn->post_send_buf_count) == 0) { | 790 | atomic_read(&ib_conn->post_send_buf_count) == 0) { |
@@ -804,37 +805,74 @@ static void iser_handle_comp_error(struct iser_desc *desc, | |||
804 | } | 805 | } |
805 | } | 806 | } |
806 | 807 | ||
808 | static int iser_drain_tx_cq(struct iser_device *device) | ||
809 | { | ||
810 | struct ib_cq *cq = device->tx_cq; | ||
811 | struct ib_wc wc; | ||
812 | struct iser_desc *tx_desc; | ||
813 | struct iser_conn *ib_conn; | ||
814 | int completed_tx = 0; | ||
815 | |||
816 | while (ib_poll_cq(cq, 1, &wc) == 1) { | ||
817 | tx_desc = (struct iser_desc *) (unsigned long) wc.wr_id; | ||
818 | ib_conn = wc.qp->qp_context; | ||
819 | if (wc.status == IB_WC_SUCCESS) { | ||
820 | if (wc.opcode == IB_WC_SEND) | ||
821 | iser_snd_completion(tx_desc); | ||
822 | else | ||
823 | iser_err("expected opcode %d got %d\n", | ||
824 | IB_WC_SEND, wc.opcode); | ||
825 | } else { | ||
826 | iser_err("tx id %llx status %d vend_err %x\n", | ||
827 | wc.wr_id, wc.status, wc.vendor_err); | ||
828 | atomic_dec(&ib_conn->post_send_buf_count); | ||
829 | iser_handle_comp_error(tx_desc, ib_conn); | ||
830 | } | ||
831 | completed_tx++; | ||
832 | } | ||
833 | return completed_tx; | ||
834 | } | ||
835 | |||
836 | |||
807 | static void iser_cq_tasklet_fn(unsigned long data) | 837 | static void iser_cq_tasklet_fn(unsigned long data) |
808 | { | 838 | { |
809 | struct iser_device *device = (struct iser_device *)data; | 839 | struct iser_device *device = (struct iser_device *)data; |
810 | struct ib_cq *cq = device->cq; | 840 | struct ib_cq *cq = device->rx_cq; |
811 | struct ib_wc wc; | 841 | struct ib_wc wc; |
812 | struct iser_desc *desc; | 842 | struct iser_rx_desc *desc; |
813 | unsigned long xfer_len; | 843 | unsigned long xfer_len; |
814 | struct iser_conn *ib_conn; | 844 | struct iser_conn *ib_conn; |
845 | int completed_tx, completed_rx; | ||
846 | completed_tx = completed_rx = 0; | ||
815 | 847 | ||
816 | while (ib_poll_cq(cq, 1, &wc) == 1) { | 848 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
817 | desc = (struct iser_desc *) (unsigned long) wc.wr_id; | 849 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; |
818 | BUG_ON(desc == NULL); | 850 | BUG_ON(desc == NULL); |
819 | ib_conn = wc.qp->qp_context; | 851 | ib_conn = wc.qp->qp_context; |
820 | |||
821 | if (wc.status == IB_WC_SUCCESS) { | 852 | if (wc.status == IB_WC_SUCCESS) { |
822 | if (wc.opcode == IB_WC_RECV) { | 853 | if (wc.opcode == IB_WC_RECV) { |
823 | xfer_len = (unsigned long)wc.byte_len; | 854 | xfer_len = (unsigned long)wc.byte_len; |
824 | iser_rcv_completion((struct iser_rx_desc *)desc, | 855 | iser_rcv_completion(desc, xfer_len, ib_conn); |
825 | xfer_len, ib_conn); | 856 | } else |
826 | } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */ | 857 | iser_err("expected opcode %d got %d\n", |
827 | iser_snd_completion(desc); | 858 | IB_WC_RECV, wc.opcode); |
828 | } else { | 859 | } else { |
829 | if (wc.status != IB_WC_WR_FLUSH_ERR) | 860 | if (wc.status != IB_WC_WR_FLUSH_ERR) |
830 | iser_err("id %llx status %d vend_err %x\n", | 861 | iser_err("rx id %llx status %d vend_err %x\n", |
831 | wc.wr_id, wc.status, wc.vendor_err); | 862 | wc.wr_id, wc.status, wc.vendor_err); |
832 | iser_handle_comp_error(desc, ib_conn); | 863 | ib_conn->post_recv_buf_count--; |
864 | iser_handle_comp_error(NULL, ib_conn); | ||
833 | } | 865 | } |
866 | completed_rx++; | ||
867 | if (!(completed_rx & 63)) | ||
868 | completed_tx += iser_drain_tx_cq(device); | ||
834 | } | 869 | } |
835 | /* #warning "it is assumed here that arming CQ only once its empty" * | 870 | /* #warning "it is assumed here that arming CQ only once its empty" * |
836 | * " would not cause interrupts to be missed" */ | 871 | * " would not cause interrupts to be missed" */ |
837 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 872 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
873 | |||
874 | completed_tx += iser_drain_tx_cq(device); | ||
875 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); | ||
838 | } | 876 | } |
839 | 877 | ||
840 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) | 878 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) |