aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-10-01 07:02:07 -0400
committerRoland Dreier <roland@purestorage.com>2014-10-09 03:06:06 -0400
commitbf17554035ab2aaf770321208ce48e69aab71cc8 (patch)
tree115f7ad8944cb02daba7ccf77e62287f080e3180 /drivers/infiniband
parentaea8f4df6da46add468c44875348e1045bffeeb7 (diff)
IB/iser: Centralize iser completion contexts
Introduce iser_comp which centralizes all iser completion related items and is referenced by iser_device and each ib_conn. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h35
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c136
2 files changed, 84 insertions, 87 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 95c484d0f881..2bc34aa50705 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -213,7 +213,6 @@ struct iser_data_buf {
213 213
214/* fwd declarations */ 214/* fwd declarations */
215struct iser_device; 215struct iser_device;
216struct iser_cq_desc;
217struct iscsi_iser_task; 216struct iscsi_iser_task;
218struct iscsi_endpoint; 217struct iscsi_endpoint;
219 218
@@ -268,20 +267,34 @@ struct iser_conn;
268struct ib_conn; 267struct ib_conn;
269struct iscsi_iser_task; 268struct iscsi_iser_task;
270 269
270/**
271 * struct iser_comp - iSER completion context
272 *
273 * @device: pointer to device handle
274 * @rx_cq: RX completion queue
275 * @tx_cq: TX completion queue
276 * @tasklet: Tasklet handle
277 * @active_qps: Number of active QPs attached
278 * to completion context
279 */
280struct iser_comp {
281 struct iser_device *device;
282 struct ib_cq *rx_cq;
283 struct ib_cq *tx_cq;
284 struct tasklet_struct tasklet;
285 int active_qps;
286};
287
271struct iser_device { 288struct iser_device {
272 struct ib_device *ib_device; 289 struct ib_device *ib_device;
273 struct ib_pd *pd; 290 struct ib_pd *pd;
274 struct ib_device_attr dev_attr; 291 struct ib_device_attr dev_attr;
275 struct ib_cq *rx_cq[ISER_MAX_CQ];
276 struct ib_cq *tx_cq[ISER_MAX_CQ];
277 struct ib_mr *mr; 292 struct ib_mr *mr;
278 struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
279 struct ib_event_handler event_handler; 293 struct ib_event_handler event_handler;
280 struct list_head ig_list; /* entry in ig devices list */ 294 struct list_head ig_list; /* entry in ig devices list */
281 int refcount; 295 int refcount;
282 int cq_active_qps[ISER_MAX_CQ]; 296 int comps_used;
283 int cqs_used; 297 struct iser_comp comps[ISER_MAX_CQ];
284 struct iser_cq_desc *cq_desc;
285 int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn, 298 int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
286 unsigned cmds_max); 299 unsigned cmds_max);
287 void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn); 300 void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
@@ -327,6 +340,7 @@ struct fast_reg_descriptor {
327 * @post_send_buf_count: post send counter 340 * @post_send_buf_count: post send counter
328 * @rx_wr: receive work request for batch posts 341 * @rx_wr: receive work request for batch posts
329 * @device: reference to iser device 342 * @device: reference to iser device
343 * @comp: iser completion context
330 * @pi_support: Indicate device T10-PI support 344 * @pi_support: Indicate device T10-PI support
331 * @lock: protects fmr/fastreg pool 345 * @lock: protects fmr/fastreg pool
332 * @union.fmr: 346 * @union.fmr:
@@ -345,7 +359,7 @@ struct ib_conn {
345 atomic_t post_send_buf_count; 359 atomic_t post_send_buf_count;
346 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; 360 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
347 struct iser_device *device; 361 struct iser_device *device;
348 int cq_index; 362 struct iser_comp *comp;
349 bool pi_support; 363 bool pi_support;
350 spinlock_t lock; 364 spinlock_t lock;
351 union { 365 union {
@@ -404,11 +418,6 @@ struct iser_page_vec {
404 int data_size; 418 int data_size;
405}; 419};
406 420
407struct iser_cq_desc {
408 struct iser_device *device;
409 int cq_index;
410};
411
412struct iser_global { 421struct iser_global {
413 struct mutex device_list_mutex;/* */ 422 struct mutex device_list_mutex;/* */
414 struct list_head device_list; /* all iSER devices */ 423 struct list_head device_list; /* all iSER devices */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 57b20c691367..94d1b46b467a 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -44,7 +44,7 @@
44 44
45static void iser_cq_tasklet_fn(unsigned long data); 45static void iser_cq_tasklet_fn(unsigned long data);
46static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 46static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
47static int iser_drain_tx_cq(struct iser_device *device, int cq_index); 47static int iser_drain_tx_cq(struct iser_comp *comp);
48 48
49static void iser_cq_event_callback(struct ib_event *cause, void *context) 49static void iser_cq_event_callback(struct ib_event *cause, void *context)
50{ 50{
@@ -72,7 +72,6 @@ static void iser_event_handler(struct ib_event_handler *handler,
72 */ 72 */
73static int iser_create_device_ib_res(struct iser_device *device) 73static int iser_create_device_ib_res(struct iser_device *device)
74{ 74{
75 struct iser_cq_desc *cq_desc;
76 struct ib_device_attr *dev_attr = &device->dev_attr; 75 struct ib_device_attr *dev_attr = &device->dev_attr;
77 int ret, i; 76 int ret, i;
78 77
@@ -102,51 +101,44 @@ static int iser_create_device_ib_res(struct iser_device *device)
102 return -1; 101 return -1;
103 } 102 }
104 103
105 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); 104 device->comps_used = min(ISER_MAX_CQ,
105 device->ib_device->num_comp_vectors);
106 iser_info("using %d CQs, device %s supports %d vectors\n", 106 iser_info("using %d CQs, device %s supports %d vectors\n",
107 device->cqs_used, device->ib_device->name, 107 device->comps_used, device->ib_device->name,
108 device->ib_device->num_comp_vectors); 108 device->ib_device->num_comp_vectors);
109 109
110 device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
111 GFP_KERNEL);
112 if (device->cq_desc == NULL)
113 goto cq_desc_err;
114 cq_desc = device->cq_desc;
115
116 device->pd = ib_alloc_pd(device->ib_device); 110 device->pd = ib_alloc_pd(device->ib_device);
117 if (IS_ERR(device->pd)) 111 if (IS_ERR(device->pd))
118 goto pd_err; 112 goto pd_err;
119 113
120 for (i = 0; i < device->cqs_used; i++) { 114 for (i = 0; i < device->comps_used; i++) {
121 cq_desc[i].device = device; 115 struct iser_comp *comp = &device->comps[i];
122 cq_desc[i].cq_index = i; 116
123 117 comp->device = device;
124 device->rx_cq[i] = ib_create_cq(device->ib_device, 118 comp->rx_cq = ib_create_cq(device->ib_device,
125 iser_cq_callback, 119 iser_cq_callback,
126 iser_cq_event_callback, 120 iser_cq_event_callback,
127 (void *)&cq_desc[i], 121 (void *)comp,
128 ISER_MAX_RX_CQ_LEN, i); 122 ISER_MAX_RX_CQ_LEN, i);
129 if (IS_ERR(device->rx_cq[i])) { 123 if (IS_ERR(comp->rx_cq)) {
130 device->rx_cq[i] = NULL; 124 comp->rx_cq = NULL;
131 goto cq_err; 125 goto cq_err;
132 } 126 }
133 127
134 device->tx_cq[i] = ib_create_cq(device->ib_device, 128 comp->tx_cq = ib_create_cq(device->ib_device, NULL,
135 NULL, iser_cq_event_callback, 129 iser_cq_event_callback,
136 (void *)&cq_desc[i], 130 (void *)comp,
137 ISER_MAX_TX_CQ_LEN, i); 131 ISER_MAX_TX_CQ_LEN, i);
138 132 if (IS_ERR(comp->tx_cq)) {
139 if (IS_ERR(device->tx_cq[i])) { 133 comp->tx_cq = NULL;
140 device->tx_cq[i] = NULL;
141 goto cq_err; 134 goto cq_err;
142 } 135 }
143 136
144 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) 137 if (ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP))
145 goto cq_err; 138 goto cq_err;
146 139
147 tasklet_init(&device->cq_tasklet[i], 140 tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
148 iser_cq_tasklet_fn, 141 (unsigned long)comp);
149 (unsigned long)&cq_desc[i]);
150 } 142 }
151 143
152 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 144 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
@@ -165,19 +157,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
165handler_err: 157handler_err:
166 ib_dereg_mr(device->mr); 158 ib_dereg_mr(device->mr);
167dma_mr_err: 159dma_mr_err:
168 for (i = 0; i < device->cqs_used; i++) 160 for (i = 0; i < device->comps_used; i++)
169 tasklet_kill(&device->cq_tasklet[i]); 161 tasklet_kill(&device->comps[i].tasklet);
170cq_err: 162cq_err:
171 for (i = 0; i < device->cqs_used; i++) { 163 for (i = 0; i < device->comps_used; i++) {
172 if (device->tx_cq[i]) 164 struct iser_comp *comp = &device->comps[i];
173 ib_destroy_cq(device->tx_cq[i]); 165
174 if (device->rx_cq[i]) 166 if (comp->tx_cq)
175 ib_destroy_cq(device->rx_cq[i]); 167 ib_destroy_cq(comp->tx_cq);
168 if (comp->rx_cq)
169 ib_destroy_cq(comp->rx_cq);
176 } 170 }
177 ib_dealloc_pd(device->pd); 171 ib_dealloc_pd(device->pd);
178pd_err: 172pd_err:
179 kfree(device->cq_desc);
180cq_desc_err:
181 iser_err("failed to allocate an IB resource\n"); 173 iser_err("failed to allocate an IB resource\n");
182 return -1; 174 return -1;
183} 175}
@@ -191,20 +183,20 @@ static void iser_free_device_ib_res(struct iser_device *device)
191 int i; 183 int i;
192 BUG_ON(device->mr == NULL); 184 BUG_ON(device->mr == NULL);
193 185
194 for (i = 0; i < device->cqs_used; i++) { 186 for (i = 0; i < device->comps_used; i++) {
195 tasklet_kill(&device->cq_tasklet[i]); 187 struct iser_comp *comp = &device->comps[i];
196 (void)ib_destroy_cq(device->tx_cq[i]); 188
197 (void)ib_destroy_cq(device->rx_cq[i]); 189 tasklet_kill(&comp->tasklet);
198 device->tx_cq[i] = NULL; 190 ib_destroy_cq(comp->tx_cq);
199 device->rx_cq[i] = NULL; 191 ib_destroy_cq(comp->rx_cq);
192 comp->tx_cq = NULL;
193 comp->rx_cq = NULL;
200 } 194 }
201 195
202 (void)ib_unregister_event_handler(&device->event_handler); 196 (void)ib_unregister_event_handler(&device->event_handler);
203 (void)ib_dereg_mr(device->mr); 197 (void)ib_dereg_mr(device->mr);
204 (void)ib_dealloc_pd(device->pd); 198 (void)ib_dealloc_pd(device->pd);
205 199
206 kfree(device->cq_desc);
207
208 device->mr = NULL; 200 device->mr = NULL;
209 device->pd = NULL; 201 device->pd = NULL;
210} 202}
@@ -456,19 +448,20 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
456 448
457 mutex_lock(&ig.connlist_mutex); 449 mutex_lock(&ig.connlist_mutex);
458 /* select the CQ with the minimal number of usages */ 450 /* select the CQ with the minimal number of usages */
459 for (index = 0; index < device->cqs_used; index++) 451 for (index = 0; index < device->comps_used; index++) {
460 if (device->cq_active_qps[index] < 452 if (device->comps[index].active_qps <
461 device->cq_active_qps[min_index]) 453 device->comps[min_index].active_qps)
462 min_index = index; 454 min_index = index;
463 device->cq_active_qps[min_index]++; 455 }
464 ib_conn->cq_index = min_index; 456 ib_conn->comp = &device->comps[min_index];
457 ib_conn->comp->active_qps++;
465 mutex_unlock(&ig.connlist_mutex); 458 mutex_unlock(&ig.connlist_mutex);
466 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); 459 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
467 460
468 init_attr.event_handler = iser_qp_event_callback; 461 init_attr.event_handler = iser_qp_event_callback;
469 init_attr.qp_context = (void *)ib_conn; 462 init_attr.qp_context = (void *)ib_conn;
470 init_attr.send_cq = device->tx_cq[min_index]; 463 init_attr.send_cq = ib_conn->comp->tx_cq;
471 init_attr.recv_cq = device->rx_cq[min_index]; 464 init_attr.recv_cq = ib_conn->comp->rx_cq;
472 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 465 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
473 init_attr.cap.max_send_sge = 2; 466 init_attr.cap.max_send_sge = 2;
474 init_attr.cap.max_recv_sge = 1; 467 init_attr.cap.max_recv_sge = 1;
@@ -604,7 +597,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
604 iser_free_rx_descriptors(iser_conn); 597 iser_free_rx_descriptors(iser_conn);
605 598
606 if (ib_conn->qp != NULL) { 599 if (ib_conn->qp != NULL) {
607 ib_conn->device->cq_active_qps[ib_conn->cq_index]--; 600 ib_conn->comp->active_qps--;
608 rdma_destroy_qp(ib_conn->cma_id); 601 rdma_destroy_qp(ib_conn->cma_id);
609 ib_conn->qp = NULL; 602 ib_conn->qp = NULL;
610 } 603 }
@@ -655,14 +648,13 @@ void iser_conn_release(struct iser_conn *iser_conn)
655 */ 648 */
656static void iser_poll_for_flush_errors(struct ib_conn *ib_conn) 649static void iser_poll_for_flush_errors(struct ib_conn *ib_conn)
657{ 650{
658 struct iser_device *device = ib_conn->device;
659 int count = 0; 651 int count = 0;
660 652
661 while (ib_conn->post_recv_buf_count > 0 || 653 while (ib_conn->post_recv_buf_count > 0 ||
662 atomic_read(&ib_conn->post_send_buf_count) > 0) { 654 atomic_read(&ib_conn->post_send_buf_count) > 0) {
663 msleep(100); 655 msleep(100);
664 if (atomic_read(&ib_conn->post_send_buf_count) > 0) 656 if (atomic_read(&ib_conn->post_send_buf_count) > 0)
665 iser_drain_tx_cq(device, ib_conn->cq_index); 657 iser_drain_tx_cq(ib_conn->comp);
666 658
667 count++; 659 count++;
668 /* Don't flood with prints */ 660 /* Don't flood with prints */
@@ -1189,9 +1181,9 @@ iser_handle_comp_error(struct iser_tx_desc *desc,
1189 kmem_cache_free(ig.desc_cache, desc); 1181 kmem_cache_free(ig.desc_cache, desc);
1190} 1182}
1191 1183
1192static int iser_drain_tx_cq(struct iser_device *device, int cq_index) 1184static int iser_drain_tx_cq(struct iser_comp *comp)
1193{ 1185{
1194 struct ib_cq *cq = device->tx_cq[cq_index]; 1186 struct ib_cq *cq = comp->tx_cq;
1195 struct ib_wc wc; 1187 struct ib_wc wc;
1196 struct iser_tx_desc *tx_desc; 1188 struct iser_tx_desc *tx_desc;
1197 struct ib_conn *ib_conn; 1189 struct ib_conn *ib_conn;
@@ -1222,20 +1214,18 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
1222 1214
1223static void iser_cq_tasklet_fn(unsigned long data) 1215static void iser_cq_tasklet_fn(unsigned long data)
1224{ 1216{
1225 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data; 1217 struct iser_comp *comp = (struct iser_comp *)data;
1226 struct iser_device *device = cq_desc->device; 1218 struct ib_cq *cq = comp->rx_cq;
1227 int cq_index = cq_desc->cq_index; 1219 struct ib_wc wc;
1228 struct ib_cq *cq = device->rx_cq[cq_index]; 1220 struct iser_rx_desc *desc;
1229 struct ib_wc wc; 1221 unsigned long xfer_len;
1230 struct iser_rx_desc *desc;
1231 unsigned long xfer_len;
1232 struct ib_conn *ib_conn; 1222 struct ib_conn *ib_conn;
1233 int completed_tx, completed_rx = 0; 1223 int completed_tx, completed_rx = 0;
1234 1224
1235 /* First do tx drain, so in a case where we have rx flushes and a successful 1225 /* First do tx drain, so in a case where we have rx flushes and a successful
1236 * tx completion we will still go through completion error handling. 1226 * tx completion we will still go through completion error handling.
1237 */ 1227 */
1238 completed_tx = iser_drain_tx_cq(device, cq_index); 1228 completed_tx = iser_drain_tx_cq(comp);
1239 1229
1240 while (ib_poll_cq(cq, 1, &wc) == 1) { 1230 while (ib_poll_cq(cq, 1, &wc) == 1) {
1241 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; 1231 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
@@ -1257,7 +1247,7 @@ static void iser_cq_tasklet_fn(unsigned long data)
1257 } 1247 }
1258 completed_rx++; 1248 completed_rx++;
1259 if (!(completed_rx & 63)) 1249 if (!(completed_rx & 63))
1260 completed_tx += iser_drain_tx_cq(device, cq_index); 1250 completed_tx += iser_drain_tx_cq(comp);
1261 } 1251 }
1262 /* #warning "it is assumed here that arming CQ only once its empty" * 1252 /* #warning "it is assumed here that arming CQ only once its empty" *
1263 * " would not cause interrupts to be missed" */ 1253 * " would not cause interrupts to be missed" */
@@ -1268,11 +1258,9 @@ static void iser_cq_tasklet_fn(unsigned long data)
1268 1258
1269static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 1259static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
1270{ 1260{
1271 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context; 1261 struct iser_comp *comp = cq_context;
1272 struct iser_device *device = cq_desc->device;
1273 int cq_index = cq_desc->cq_index;
1274 1262
1275 tasklet_schedule(&device->cq_tasklet[cq_index]); 1263 tasklet_schedule(&comp->tasklet);
1276} 1264}
1277 1265
1278u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 1266u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,