diff options
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 192 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 30 |
2 files changed, 114 insertions, 108 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c4b9c6d3662b..d4a208381af8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -107,22 +107,25 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
107 | { | 107 | { |
108 | struct isert_device *device = isert_conn->conn_device; | 108 | struct isert_device *device = isert_conn->conn_device; |
109 | struct ib_qp_init_attr attr; | 109 | struct ib_qp_init_attr attr; |
110 | int ret, index, min_index = 0; | 110 | struct isert_comp *comp; |
111 | int ret, i, min = 0; | ||
111 | 112 | ||
112 | mutex_lock(&device_list_mutex); | 113 | mutex_lock(&device_list_mutex); |
113 | for (index = 0; index < device->cqs_used; index++) | 114 | for (i = 0; i < device->comps_used; i++) |
114 | if (device->cq_active_qps[index] < | 115 | if (device->comps[i].active_qps < |
115 | device->cq_active_qps[min_index]) | 116 | device->comps[min].active_qps) |
116 | min_index = index; | 117 | min = i; |
117 | device->cq_active_qps[min_index]++; | 118 | comp = &device->comps[min]; |
118 | pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); | 119 | comp->active_qps++; |
120 | pr_info("conn %p, using comp %p min_index: %d\n", | ||
121 | isert_conn, comp, min); | ||
119 | mutex_unlock(&device_list_mutex); | 122 | mutex_unlock(&device_list_mutex); |
120 | 123 | ||
121 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); | 124 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); |
122 | attr.event_handler = isert_qp_event_callback; | 125 | attr.event_handler = isert_qp_event_callback; |
123 | attr.qp_context = isert_conn; | 126 | attr.qp_context = isert_conn; |
124 | attr.send_cq = device->dev_tx_cq[min_index]; | 127 | attr.send_cq = comp->tx_cq; |
125 | attr.recv_cq = device->dev_rx_cq[min_index]; | 128 | attr.recv_cq = comp->rx_cq; |
126 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; | 129 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; |
127 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; | 130 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; |
128 | /* | 131 | /* |
@@ -157,7 +160,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
157 | return 0; | 160 | return 0; |
158 | err: | 161 | err: |
159 | mutex_lock(&device_list_mutex); | 162 | mutex_lock(&device_list_mutex); |
160 | device->cq_active_qps[min_index]--; | 163 | comp->active_qps--; |
161 | mutex_unlock(&device_list_mutex); | 164 | mutex_unlock(&device_list_mutex); |
162 | 165 | ||
163 | return ret; | 166 | return ret; |
@@ -243,9 +246,8 @@ static int | |||
243 | isert_create_device_ib_res(struct isert_device *device) | 246 | isert_create_device_ib_res(struct isert_device *device) |
244 | { | 247 | { |
245 | struct ib_device *ib_dev = device->ib_device; | 248 | struct ib_device *ib_dev = device->ib_device; |
246 | struct isert_cq_desc *cq_desc; | ||
247 | struct ib_device_attr *dev_attr; | 249 | struct ib_device_attr *dev_attr; |
248 | int ret = 0, i, j; | 250 | int ret = 0, i; |
249 | int max_rx_cqe, max_tx_cqe; | 251 | int max_rx_cqe, max_tx_cqe; |
250 | 252 | ||
251 | dev_attr = &device->dev_attr; | 253 | dev_attr = &device->dev_attr; |
@@ -272,55 +274,54 @@ isert_create_device_ib_res(struct isert_device *device) | |||
272 | device->pi_capable = dev_attr->device_cap_flags & | 274 | device->pi_capable = dev_attr->device_cap_flags & |
273 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; | 275 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; |
274 | 276 | ||
275 | device->cqs_used = min_t(int, num_online_cpus(), | 277 | device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), |
276 | device->ib_device->num_comp_vectors); | 278 | device->ib_device->num_comp_vectors)); |
277 | device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); | 279 | pr_info("Using %d CQs, %s supports %d vectors support " |
278 | pr_debug("Using %d CQs, device %s supports %d vectors support " | 280 | "Fast registration %d pi_capable %d\n", |
279 | "Fast registration %d pi_capable %d\n", | 281 | device->comps_used, device->ib_device->name, |
280 | device->cqs_used, device->ib_device->name, | 282 | device->ib_device->num_comp_vectors, device->use_fastreg, |
281 | device->ib_device->num_comp_vectors, device->use_fastreg, | 283 | device->pi_capable); |
282 | device->pi_capable); | 284 | |
283 | device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * | 285 | device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), |
284 | device->cqs_used, GFP_KERNEL); | 286 | GFP_KERNEL); |
285 | if (!device->cq_desc) { | 287 | if (!device->comps) { |
286 | pr_err("Unable to allocate device->cq_desc\n"); | 288 | pr_err("Unable to allocate completion contexts\n"); |
287 | return -ENOMEM; | 289 | return -ENOMEM; |
288 | } | 290 | } |
289 | cq_desc = device->cq_desc; | 291 | |
290 | 292 | for (i = 0; i < device->comps_used; i++) { | |
291 | for (i = 0; i < device->cqs_used; i++) { | 293 | struct isert_comp *comp = &device->comps[i]; |
292 | cq_desc[i].device = device; | 294 | |
293 | cq_desc[i].cq_index = i; | 295 | comp->device = device; |
294 | 296 | INIT_WORK(&comp->rx_work, isert_cq_rx_work); | |
295 | INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); | 297 | comp->rx_cq = ib_create_cq(device->ib_device, |
296 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, | 298 | isert_cq_rx_callback, |
297 | isert_cq_rx_callback, | 299 | isert_cq_event_callback, |
298 | isert_cq_event_callback, | 300 | (void *)comp, |
299 | (void *)&cq_desc[i], | 301 | max_rx_cqe, i); |
300 | max_rx_cqe, i); | 302 | if (IS_ERR(comp->rx_cq)) { |
301 | if (IS_ERR(device->dev_rx_cq[i])) { | 303 | ret = PTR_ERR(comp->rx_cq); |
302 | ret = PTR_ERR(device->dev_rx_cq[i]); | 304 | comp->rx_cq = NULL; |
303 | device->dev_rx_cq[i] = NULL; | ||
304 | goto out_cq; | 305 | goto out_cq; |
305 | } | 306 | } |
306 | 307 | ||
307 | INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); | 308 | INIT_WORK(&comp->tx_work, isert_cq_tx_work); |
308 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, | 309 | comp->tx_cq = ib_create_cq(device->ib_device, |
309 | isert_cq_tx_callback, | 310 | isert_cq_tx_callback, |
310 | isert_cq_event_callback, | 311 | isert_cq_event_callback, |
311 | (void *)&cq_desc[i], | 312 | (void *)comp, |
312 | max_tx_cqe, i); | 313 | max_tx_cqe, i); |
313 | if (IS_ERR(device->dev_tx_cq[i])) { | 314 | if (IS_ERR(comp->tx_cq)) { |
314 | ret = PTR_ERR(device->dev_tx_cq[i]); | 315 | ret = PTR_ERR(comp->tx_cq); |
315 | device->dev_tx_cq[i] = NULL; | 316 | comp->tx_cq = NULL; |
316 | goto out_cq; | 317 | goto out_cq; |
317 | } | 318 | } |
318 | 319 | ||
319 | ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); | 320 | ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP); |
320 | if (ret) | 321 | if (ret) |
321 | goto out_cq; | 322 | goto out_cq; |
322 | 323 | ||
323 | ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); | 324 | ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP); |
324 | if (ret) | 325 | if (ret) |
325 | goto out_cq; | 326 | goto out_cq; |
326 | } | 327 | } |
@@ -328,19 +329,19 @@ isert_create_device_ib_res(struct isert_device *device) | |||
328 | return 0; | 329 | return 0; |
329 | 330 | ||
330 | out_cq: | 331 | out_cq: |
331 | for (j = 0; j < i; j++) { | 332 | for (i = 0; i < device->comps_used; i++) { |
332 | cq_desc = &device->cq_desc[j]; | 333 | struct isert_comp *comp = &device->comps[i]; |
333 | 334 | ||
334 | if (device->dev_rx_cq[j]) { | 335 | if (comp->rx_cq) { |
335 | cancel_work_sync(&cq_desc->cq_rx_work); | 336 | cancel_work_sync(&comp->rx_work); |
336 | ib_destroy_cq(device->dev_rx_cq[j]); | 337 | ib_destroy_cq(comp->rx_cq); |
337 | } | 338 | } |
338 | if (device->dev_tx_cq[j]) { | 339 | if (comp->tx_cq) { |
339 | cancel_work_sync(&cq_desc->cq_tx_work); | 340 | cancel_work_sync(&comp->tx_work); |
340 | ib_destroy_cq(device->dev_tx_cq[j]); | 341 | ib_destroy_cq(comp->tx_cq); |
341 | } | 342 | } |
342 | } | 343 | } |
343 | kfree(device->cq_desc); | 344 | kfree(device->comps); |
344 | 345 | ||
345 | return ret; | 346 | return ret; |
346 | } | 347 | } |
@@ -348,21 +349,21 @@ out_cq: | |||
348 | static void | 349 | static void |
349 | isert_free_device_ib_res(struct isert_device *device) | 350 | isert_free_device_ib_res(struct isert_device *device) |
350 | { | 351 | { |
351 | struct isert_cq_desc *cq_desc; | ||
352 | int i; | 352 | int i; |
353 | 353 | ||
354 | for (i = 0; i < device->cqs_used; i++) { | 354 | pr_info("device %p\n", device); |
355 | cq_desc = &device->cq_desc[i]; | ||
356 | 355 | ||
357 | cancel_work_sync(&cq_desc->cq_rx_work); | 356 | for (i = 0; i < device->comps_used; i++) { |
358 | cancel_work_sync(&cq_desc->cq_tx_work); | 357 | struct isert_comp *comp = &device->comps[i]; |
359 | ib_destroy_cq(device->dev_rx_cq[i]); | ||
360 | ib_destroy_cq(device->dev_tx_cq[i]); | ||
361 | device->dev_rx_cq[i] = NULL; | ||
362 | device->dev_tx_cq[i] = NULL; | ||
363 | } | ||
364 | 358 | ||
365 | kfree(device->cq_desc); | 359 | cancel_work_sync(&comp->rx_work); |
360 | cancel_work_sync(&comp->tx_work); | ||
361 | ib_destroy_cq(comp->rx_cq); | ||
362 | ib_destroy_cq(comp->tx_cq); | ||
363 | comp->rx_cq = NULL; | ||
364 | comp->tx_cq = NULL; | ||
365 | } | ||
366 | kfree(device->comps); | ||
366 | } | 367 | } |
367 | 368 | ||
368 | static void | 369 | static void |
@@ -740,7 +741,6 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
740 | { | 741 | { |
741 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 742 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
742 | struct isert_device *device = isert_conn->conn_device; | 743 | struct isert_device *device = isert_conn->conn_device; |
743 | int cq_index; | ||
744 | 744 | ||
745 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 745 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
746 | 746 | ||
@@ -751,11 +751,11 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
751 | rdma_destroy_id(isert_conn->conn_cm_id); | 751 | rdma_destroy_id(isert_conn->conn_cm_id); |
752 | 752 | ||
753 | if (isert_conn->conn_qp) { | 753 | if (isert_conn->conn_qp) { |
754 | cq_index = ((struct isert_cq_desc *) | 754 | struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; |
755 | isert_conn->conn_qp->recv_cq->cq_context)->cq_index; | 755 | |
756 | pr_debug("isert_connect_release: cq_index: %d\n", cq_index); | 756 | pr_debug("dec completion context %p active_qps\n", comp); |
757 | mutex_lock(&device_list_mutex); | 757 | mutex_lock(&device_list_mutex); |
758 | isert_conn->conn_device->cq_active_qps[cq_index]--; | 758 | comp->active_qps--; |
759 | mutex_unlock(&device_list_mutex); | 759 | mutex_unlock(&device_list_mutex); |
760 | 760 | ||
761 | ib_destroy_qp(isert_conn->conn_qp); | 761 | ib_destroy_qp(isert_conn->conn_qp); |
@@ -1524,7 +1524,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1524 | 1524 | ||
1525 | static void | 1525 | static void |
1526 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | 1526 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, |
1527 | unsigned long xfer_len) | 1527 | u32 xfer_len) |
1528 | { | 1528 | { |
1529 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1529 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1530 | struct iscsi_hdr *hdr; | 1530 | struct iscsi_hdr *hdr; |
@@ -2051,18 +2051,16 @@ isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) | |||
2051 | static void | 2051 | static void |
2052 | isert_cq_tx_work(struct work_struct *work) | 2052 | isert_cq_tx_work(struct work_struct *work) |
2053 | { | 2053 | { |
2054 | struct isert_cq_desc *cq_desc = container_of(work, | 2054 | struct isert_comp *comp = container_of(work, struct isert_comp, |
2055 | struct isert_cq_desc, cq_tx_work); | 2055 | tx_work); |
2056 | struct isert_device *device = cq_desc->device; | 2056 | struct ib_cq *cq = comp->tx_cq; |
2057 | int cq_index = cq_desc->cq_index; | ||
2058 | struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; | ||
2059 | struct isert_conn *isert_conn; | 2057 | struct isert_conn *isert_conn; |
2060 | struct iser_tx_desc *tx_desc; | 2058 | struct iser_tx_desc *tx_desc; |
2061 | struct ib_wc wc; | 2059 | struct ib_wc wc; |
2062 | 2060 | ||
2063 | while (ib_poll_cq(tx_cq, 1, &wc) == 1) { | 2061 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
2064 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; | ||
2065 | isert_conn = wc.qp->qp_context; | 2062 | isert_conn = wc.qp->qp_context; |
2063 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; | ||
2066 | 2064 | ||
2067 | if (wc.status == IB_WC_SUCCESS) { | 2065 | if (wc.status == IB_WC_SUCCESS) { |
2068 | isert_send_completion(tx_desc, isert_conn); | 2066 | isert_send_completion(tx_desc, isert_conn); |
@@ -2076,36 +2074,34 @@ isert_cq_tx_work(struct work_struct *work) | |||
2076 | } | 2074 | } |
2077 | } | 2075 | } |
2078 | 2076 | ||
2079 | ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); | 2077 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
2080 | } | 2078 | } |
2081 | 2079 | ||
2082 | static void | 2080 | static void |
2083 | isert_cq_tx_callback(struct ib_cq *cq, void *context) | 2081 | isert_cq_tx_callback(struct ib_cq *cq, void *context) |
2084 | { | 2082 | { |
2085 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 2083 | struct isert_comp *comp = context; |
2086 | 2084 | ||
2087 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); | 2085 | queue_work(isert_comp_wq, &comp->tx_work); |
2088 | } | 2086 | } |
2089 | 2087 | ||
2090 | static void | 2088 | static void |
2091 | isert_cq_rx_work(struct work_struct *work) | 2089 | isert_cq_rx_work(struct work_struct *work) |
2092 | { | 2090 | { |
2093 | struct isert_cq_desc *cq_desc = container_of(work, | 2091 | struct isert_comp *comp = container_of(work, struct isert_comp, |
2094 | struct isert_cq_desc, cq_rx_work); | 2092 | rx_work); |
2095 | struct isert_device *device = cq_desc->device; | 2093 | struct ib_cq *cq = comp->rx_cq; |
2096 | int cq_index = cq_desc->cq_index; | ||
2097 | struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; | ||
2098 | struct isert_conn *isert_conn; | 2094 | struct isert_conn *isert_conn; |
2099 | struct iser_rx_desc *rx_desc; | 2095 | struct iser_rx_desc *rx_desc; |
2100 | struct ib_wc wc; | 2096 | struct ib_wc wc; |
2101 | unsigned long xfer_len; | 2097 | u32 xfer_len; |
2102 | 2098 | ||
2103 | while (ib_poll_cq(rx_cq, 1, &wc) == 1) { | 2099 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
2104 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; | ||
2105 | isert_conn = wc.qp->qp_context; | 2100 | isert_conn = wc.qp->qp_context; |
2101 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; | ||
2106 | 2102 | ||
2107 | if (wc.status == IB_WC_SUCCESS) { | 2103 | if (wc.status == IB_WC_SUCCESS) { |
2108 | xfer_len = (unsigned long)wc.byte_len; | 2104 | xfer_len = wc.byte_len; |
2109 | isert_rx_completion(rx_desc, isert_conn, xfer_len); | 2105 | isert_rx_completion(rx_desc, isert_conn, xfer_len); |
2110 | } else { | 2106 | } else { |
2111 | pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 2107 | pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); |
@@ -2118,15 +2114,15 @@ isert_cq_rx_work(struct work_struct *work) | |||
2118 | } | 2114 | } |
2119 | } | 2115 | } |
2120 | 2116 | ||
2121 | ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); | 2117 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
2122 | } | 2118 | } |
2123 | 2119 | ||
2124 | static void | 2120 | static void |
2125 | isert_cq_rx_callback(struct ib_cq *cq, void *context) | 2121 | isert_cq_rx_callback(struct ib_cq *cq, void *context) |
2126 | { | 2122 | { |
2127 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 2123 | struct isert_comp *comp = context; |
2128 | 2124 | ||
2129 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); | 2125 | queue_work(isert_rx_wq, &comp->rx_work); |
2130 | } | 2126 | } |
2131 | 2127 | ||
2132 | static int | 2128 | static int |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index b3735a07ca47..3f93cb0a0f03 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -159,23 +159,33 @@ struct isert_conn { | |||
159 | 159 | ||
160 | #define ISERT_MAX_CQ 64 | 160 | #define ISERT_MAX_CQ 64 |
161 | 161 | ||
162 | struct isert_cq_desc { | 162 | /** |
163 | struct isert_device *device; | 163 | * struct isert_comp - iSER completion context |
164 | int cq_index; | 164 | * |
165 | struct work_struct cq_rx_work; | 165 | * @device: pointer to device handle |
166 | struct work_struct cq_tx_work; | 166 | * @rx_cq: RX completion queue |
167 | * @tx_cq: TX completion queue | ||
168 | * @active_qps: Number of active QPs attached | ||
169 | * to completion context | ||
170 | * @rx_work: RX work handle | ||
171 | * @tx_work: TX work handle | ||
172 | */ | ||
173 | struct isert_comp { | ||
174 | struct isert_device *device; | ||
175 | struct ib_cq *rx_cq; | ||
176 | struct ib_cq *tx_cq; | ||
177 | int active_qps; | ||
178 | struct work_struct rx_work; | ||
179 | struct work_struct tx_work; | ||
167 | }; | 180 | }; |
168 | 181 | ||
169 | struct isert_device { | 182 | struct isert_device { |
170 | int use_fastreg; | 183 | int use_fastreg; |
171 | bool pi_capable; | 184 | bool pi_capable; |
172 | int cqs_used; | ||
173 | int refcount; | 185 | int refcount; |
174 | int cq_active_qps[ISERT_MAX_CQ]; | ||
175 | struct ib_device *ib_device; | 186 | struct ib_device *ib_device; |
176 | struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; | 187 | struct isert_comp *comps; |
177 | struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; | 188 | int comps_used; |
178 | struct isert_cq_desc *cq_desc; | ||
179 | struct list_head dev_node; | 189 | struct list_head dev_node; |
180 | struct ib_device_attr dev_attr; | 190 | struct ib_device_attr dev_attr; |
181 | int (*reg_rdma_mem)(struct iscsi_conn *conn, | 191 | int (*reg_rdma_mem)(struct iscsi_conn *conn, |