diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2014-12-02 09:57:41 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-12-13 02:32:30 -0500 |
commit | 6f0fae3d7797172f5c30ada80e815122fdf55609 (patch) | |
tree | 6fb3625df1c2a678bcfd300b442d0b1d05e9ec39 /drivers/infiniband | |
parent | 4a295bae7e72d870354d337fe42360b0e4441e57 (diff) |
iser-target: Use single CQ for TX and RX
Using TX and RX CQs attached to the same vector might
create a throttling effect coming from the serial processing
of a work-queue. Use one CQ instead, it will do better in interrupt
processing and it provides a simpler code. Also, We get rid of
redundant isert_rx_wq.
Next we can remove the atomic post_send_buf_count from the IO path.
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 193 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 14 |
2 files changed, 83 insertions, 124 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d4a208381af8..0dc6287a50f0 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -35,10 +35,10 @@ | |||
35 | #define ISERT_MAX_CONN 8 | 35 | #define ISERT_MAX_CONN 8 |
36 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) | 36 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) |
37 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) | 37 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) |
38 | #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN) | ||
38 | 39 | ||
39 | static DEFINE_MUTEX(device_list_mutex); | 40 | static DEFINE_MUTEX(device_list_mutex); |
40 | static LIST_HEAD(device_list); | 41 | static LIST_HEAD(device_list); |
41 | static struct workqueue_struct *isert_rx_wq; | ||
42 | static struct workqueue_struct *isert_comp_wq; | 42 | static struct workqueue_struct *isert_comp_wq; |
43 | static struct workqueue_struct *isert_release_wq; | 43 | static struct workqueue_struct *isert_release_wq; |
44 | 44 | ||
@@ -124,8 +124,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) | |||
124 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); | 124 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); |
125 | attr.event_handler = isert_qp_event_callback; | 125 | attr.event_handler = isert_qp_event_callback; |
126 | attr.qp_context = isert_conn; | 126 | attr.qp_context = isert_conn; |
127 | attr.send_cq = comp->tx_cq; | 127 | attr.send_cq = comp->cq; |
128 | attr.recv_cq = comp->rx_cq; | 128 | attr.recv_cq = comp->cq; |
129 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; | 129 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; |
130 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; | 130 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; |
131 | /* | 131 | /* |
@@ -237,10 +237,8 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) | |||
237 | isert_conn->conn_rx_descs = NULL; | 237 | isert_conn->conn_rx_descs = NULL; |
238 | } | 238 | } |
239 | 239 | ||
240 | static void isert_cq_tx_work(struct work_struct *); | 240 | static void isert_cq_work(struct work_struct *); |
241 | static void isert_cq_tx_callback(struct ib_cq *, void *); | 241 | static void isert_cq_callback(struct ib_cq *, void *); |
242 | static void isert_cq_rx_work(struct work_struct *); | ||
243 | static void isert_cq_rx_callback(struct ib_cq *, void *); | ||
244 | 242 | ||
245 | static int | 243 | static int |
246 | isert_create_device_ib_res(struct isert_device *device) | 244 | isert_create_device_ib_res(struct isert_device *device) |
@@ -248,15 +246,14 @@ isert_create_device_ib_res(struct isert_device *device) | |||
248 | struct ib_device *ib_dev = device->ib_device; | 246 | struct ib_device *ib_dev = device->ib_device; |
249 | struct ib_device_attr *dev_attr; | 247 | struct ib_device_attr *dev_attr; |
250 | int ret = 0, i; | 248 | int ret = 0, i; |
251 | int max_rx_cqe, max_tx_cqe; | 249 | int max_cqe; |
252 | 250 | ||
253 | dev_attr = &device->dev_attr; | 251 | dev_attr = &device->dev_attr; |
254 | ret = isert_query_device(ib_dev, dev_attr); | 252 | ret = isert_query_device(ib_dev, dev_attr); |
255 | if (ret) | 253 | if (ret) |
256 | return ret; | 254 | return ret; |
257 | 255 | ||
258 | max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); | 256 | max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); |
259 | max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); | ||
260 | 257 | ||
261 | /* asign function handlers */ | 258 | /* asign function handlers */ |
262 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | 259 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && |
@@ -293,35 +290,19 @@ isert_create_device_ib_res(struct isert_device *device) | |||
293 | struct isert_comp *comp = &device->comps[i]; | 290 | struct isert_comp *comp = &device->comps[i]; |
294 | 291 | ||
295 | comp->device = device; | 292 | comp->device = device; |
296 | INIT_WORK(&comp->rx_work, isert_cq_rx_work); | 293 | INIT_WORK(&comp->work, isert_cq_work); |
297 | comp->rx_cq = ib_create_cq(device->ib_device, | 294 | comp->cq = ib_create_cq(device->ib_device, |
298 | isert_cq_rx_callback, | 295 | isert_cq_callback, |
299 | isert_cq_event_callback, | 296 | isert_cq_event_callback, |
300 | (void *)comp, | 297 | (void *)comp, |
301 | max_rx_cqe, i); | 298 | max_cqe, i); |
302 | if (IS_ERR(comp->rx_cq)) { | 299 | if (IS_ERR(comp->cq)) { |
303 | ret = PTR_ERR(comp->rx_cq); | 300 | ret = PTR_ERR(comp->cq); |
304 | comp->rx_cq = NULL; | 301 | comp->cq = NULL; |
305 | goto out_cq; | 302 | goto out_cq; |
306 | } | 303 | } |
307 | 304 | ||
308 | INIT_WORK(&comp->tx_work, isert_cq_tx_work); | 305 | ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
309 | comp->tx_cq = ib_create_cq(device->ib_device, | ||
310 | isert_cq_tx_callback, | ||
311 | isert_cq_event_callback, | ||
312 | (void *)comp, | ||
313 | max_tx_cqe, i); | ||
314 | if (IS_ERR(comp->tx_cq)) { | ||
315 | ret = PTR_ERR(comp->tx_cq); | ||
316 | comp->tx_cq = NULL; | ||
317 | goto out_cq; | ||
318 | } | ||
319 | |||
320 | ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP); | ||
321 | if (ret) | ||
322 | goto out_cq; | ||
323 | |||
324 | ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP); | ||
325 | if (ret) | 306 | if (ret) |
326 | goto out_cq; | 307 | goto out_cq; |
327 | } | 308 | } |
@@ -332,13 +313,9 @@ out_cq: | |||
332 | for (i = 0; i < device->comps_used; i++) { | 313 | for (i = 0; i < device->comps_used; i++) { |
333 | struct isert_comp *comp = &device->comps[i]; | 314 | struct isert_comp *comp = &device->comps[i]; |
334 | 315 | ||
335 | if (comp->rx_cq) { | 316 | if (comp->cq) { |
336 | cancel_work_sync(&comp->rx_work); | 317 | cancel_work_sync(&comp->work); |
337 | ib_destroy_cq(comp->rx_cq); | 318 | ib_destroy_cq(comp->cq); |
338 | } | ||
339 | if (comp->tx_cq) { | ||
340 | cancel_work_sync(&comp->tx_work); | ||
341 | ib_destroy_cq(comp->tx_cq); | ||
342 | } | 319 | } |
343 | } | 320 | } |
344 | kfree(device->comps); | 321 | kfree(device->comps); |
@@ -356,12 +333,9 @@ isert_free_device_ib_res(struct isert_device *device) | |||
356 | for (i = 0; i < device->comps_used; i++) { | 333 | for (i = 0; i < device->comps_used; i++) { |
357 | struct isert_comp *comp = &device->comps[i]; | 334 | struct isert_comp *comp = &device->comps[i]; |
358 | 335 | ||
359 | cancel_work_sync(&comp->rx_work); | 336 | cancel_work_sync(&comp->work); |
360 | cancel_work_sync(&comp->tx_work); | 337 | ib_destroy_cq(comp->cq); |
361 | ib_destroy_cq(comp->rx_cq); | 338 | comp->cq = NULL; |
362 | ib_destroy_cq(comp->tx_cq); | ||
363 | comp->rx_cq = NULL; | ||
364 | comp->tx_cq = NULL; | ||
365 | } | 339 | } |
366 | kfree(device->comps); | 340 | kfree(device->comps); |
367 | } | 341 | } |
@@ -2013,14 +1987,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
2013 | } | 1987 | } |
2014 | } | 1988 | } |
2015 | 1989 | ||
1990 | /** | ||
1991 | * is_isert_tx_desc() - Indicate if the completion wr_id | ||
1992 | * is a TX descriptor or not. | ||
1993 | * @isert_conn: iser connection | ||
1994 | * @wr_id: completion WR identifier | ||
1995 | * | ||
1996 | * Since we cannot rely on wc opcode in FLUSH errors | ||
1997 | * we must work around it by checking if the wr_id address | ||
1998 | * falls in the iser connection rx_descs buffer. If so | ||
1999 | * it is an RX descriptor, otherwize it is a TX. | ||
2000 | */ | ||
2001 | static inline bool | ||
2002 | is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) | ||
2003 | { | ||
2004 | void *start = isert_conn->conn_rx_descs; | ||
2005 | int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); | ||
2006 | |||
2007 | if (wr_id >= start && wr_id < start + len) | ||
2008 | return false; | ||
2009 | |||
2010 | return true; | ||
2011 | } | ||
2012 | |||
2016 | static void | 2013 | static void |
2017 | isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) | 2014 | isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) |
2018 | { | 2015 | { |
2019 | if (tx) { | 2016 | if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) { |
2020 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2017 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
2021 | struct isert_cmd *isert_cmd; | 2018 | struct isert_cmd *isert_cmd; |
2019 | struct iser_tx_desc *desc; | ||
2022 | 2020 | ||
2023 | isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd; | 2021 | desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2022 | isert_cmd = desc->isert_cmd; | ||
2024 | if (!isert_cmd) | 2023 | if (!isert_cmd) |
2025 | isert_unmap_tx_desc(desc, ib_dev); | 2024 | isert_unmap_tx_desc(desc, ib_dev); |
2026 | else | 2025 | else |
@@ -2049,80 +2048,52 @@ isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) | |||
2049 | } | 2048 | } |
2050 | 2049 | ||
2051 | static void | 2050 | static void |
2052 | isert_cq_tx_work(struct work_struct *work) | 2051 | isert_handle_wc(struct ib_wc *wc) |
2053 | { | 2052 | { |
2054 | struct isert_comp *comp = container_of(work, struct isert_comp, | ||
2055 | tx_work); | ||
2056 | struct ib_cq *cq = comp->tx_cq; | ||
2057 | struct isert_conn *isert_conn; | 2053 | struct isert_conn *isert_conn; |
2058 | struct iser_tx_desc *tx_desc; | 2054 | struct iser_tx_desc *tx_desc; |
2059 | struct ib_wc wc; | 2055 | struct iser_rx_desc *rx_desc; |
2060 | |||
2061 | while (ib_poll_cq(cq, 1, &wc) == 1) { | ||
2062 | isert_conn = wc.qp->qp_context; | ||
2063 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; | ||
2064 | 2056 | ||
2065 | if (wc.status == IB_WC_SUCCESS) { | 2057 | isert_conn = wc->qp->qp_context; |
2066 | isert_send_completion(tx_desc, isert_conn); | 2058 | if (likely(wc->status == IB_WC_SUCCESS)) { |
2059 | if (wc->opcode == IB_WC_RECV) { | ||
2060 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; | ||
2061 | isert_rx_completion(rx_desc, isert_conn, wc->byte_len); | ||
2067 | } else { | 2062 | } else { |
2068 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 2063 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2069 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 2064 | isert_send_completion(tx_desc, isert_conn); |
2070 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | ||
2071 | |||
2072 | if (wc.wr_id != ISER_FASTREG_LI_WRID) | ||
2073 | isert_cq_comp_err(tx_desc, isert_conn, true); | ||
2074 | } | 2065 | } |
2075 | } | 2066 | } else { |
2076 | 2067 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
2077 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 2068 | pr_err("wr id %llx status %d vend_err %x\n", |
2078 | } | 2069 | wc->wr_id, wc->status, wc->vendor_err); |
2079 | 2070 | else | |
2080 | static void | 2071 | pr_debug("flush error: wr id %llx\n", wc->wr_id); |
2081 | isert_cq_tx_callback(struct ib_cq *cq, void *context) | ||
2082 | { | ||
2083 | struct isert_comp *comp = context; | ||
2084 | 2072 | ||
2085 | queue_work(isert_comp_wq, &comp->tx_work); | 2073 | if (wc->wr_id != ISER_FASTREG_LI_WRID) |
2074 | isert_cq_comp_err(isert_conn, wc); | ||
2075 | } | ||
2086 | } | 2076 | } |
2087 | 2077 | ||
2088 | static void | 2078 | static void |
2089 | isert_cq_rx_work(struct work_struct *work) | 2079 | isert_cq_work(struct work_struct *work) |
2090 | { | 2080 | { |
2091 | struct isert_comp *comp = container_of(work, struct isert_comp, | 2081 | struct isert_comp *comp = container_of(work, struct isert_comp, |
2092 | rx_work); | 2082 | work); |
2093 | struct ib_cq *cq = comp->rx_cq; | ||
2094 | struct isert_conn *isert_conn; | ||
2095 | struct iser_rx_desc *rx_desc; | ||
2096 | struct ib_wc wc; | 2083 | struct ib_wc wc; |
2097 | u32 xfer_len; | ||
2098 | |||
2099 | while (ib_poll_cq(cq, 1, &wc) == 1) { | ||
2100 | isert_conn = wc.qp->qp_context; | ||
2101 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; | ||
2102 | 2084 | ||
2103 | if (wc.status == IB_WC_SUCCESS) { | 2085 | while (ib_poll_cq(comp->cq, 1, &wc) == 1) |
2104 | xfer_len = wc.byte_len; | 2086 | isert_handle_wc(&wc); |
2105 | isert_rx_completion(rx_desc, isert_conn, xfer_len); | ||
2106 | } else { | ||
2107 | pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | ||
2108 | if (wc.status != IB_WC_WR_FLUSH_ERR) { | ||
2109 | pr_debug("RX wc.status: 0x%08x\n", wc.status); | ||
2110 | pr_debug("RX wc.vendor_err: 0x%08x\n", | ||
2111 | wc.vendor_err); | ||
2112 | } | ||
2113 | isert_cq_comp_err(rx_desc, isert_conn, false); | ||
2114 | } | ||
2115 | } | ||
2116 | 2087 | ||
2117 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 2088 | ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
2118 | } | 2089 | } |
2119 | 2090 | ||
2120 | static void | 2091 | static void |
2121 | isert_cq_rx_callback(struct ib_cq *cq, void *context) | 2092 | isert_cq_callback(struct ib_cq *cq, void *context) |
2122 | { | 2093 | { |
2123 | struct isert_comp *comp = context; | 2094 | struct isert_comp *comp = context; |
2124 | 2095 | ||
2125 | queue_work(isert_rx_wq, &comp->rx_work); | 2096 | queue_work(isert_comp_wq, &comp->work); |
2126 | } | 2097 | } |
2127 | 2098 | ||
2128 | static int | 2099 | static int |
@@ -3363,17 +3334,11 @@ static int __init isert_init(void) | |||
3363 | { | 3334 | { |
3364 | int ret; | 3335 | int ret; |
3365 | 3336 | ||
3366 | isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); | ||
3367 | if (!isert_rx_wq) { | ||
3368 | pr_err("Unable to allocate isert_rx_wq\n"); | ||
3369 | return -ENOMEM; | ||
3370 | } | ||
3371 | |||
3372 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); | 3337 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); |
3373 | if (!isert_comp_wq) { | 3338 | if (!isert_comp_wq) { |
3374 | pr_err("Unable to allocate isert_comp_wq\n"); | 3339 | pr_err("Unable to allocate isert_comp_wq\n"); |
3375 | ret = -ENOMEM; | 3340 | ret = -ENOMEM; |
3376 | goto destroy_rx_wq; | 3341 | return -ENOMEM; |
3377 | } | 3342 | } |
3378 | 3343 | ||
3379 | isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, | 3344 | isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, |
@@ -3391,8 +3356,7 @@ static int __init isert_init(void) | |||
3391 | 3356 | ||
3392 | destroy_comp_wq: | 3357 | destroy_comp_wq: |
3393 | destroy_workqueue(isert_comp_wq); | 3358 | destroy_workqueue(isert_comp_wq); |
3394 | destroy_rx_wq: | 3359 | |
3395 | destroy_workqueue(isert_rx_wq); | ||
3396 | return ret; | 3360 | return ret; |
3397 | } | 3361 | } |
3398 | 3362 | ||
@@ -3401,7 +3365,6 @@ static void __exit isert_exit(void) | |||
3401 | flush_scheduled_work(); | 3365 | flush_scheduled_work(); |
3402 | destroy_workqueue(isert_release_wq); | 3366 | destroy_workqueue(isert_release_wq); |
3403 | destroy_workqueue(isert_comp_wq); | 3367 | destroy_workqueue(isert_comp_wq); |
3404 | destroy_workqueue(isert_rx_wq); | ||
3405 | iscsit_unregister_transport(&iser_target_transport); | 3368 | iscsit_unregister_transport(&iser_target_transport); |
3406 | pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); | 3369 | pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); |
3407 | } | 3370 | } |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3f93cb0a0f03..5c1a31e8df70 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -163,20 +163,16 @@ struct isert_conn { | |||
163 | * struct isert_comp - iSER completion context | 163 | * struct isert_comp - iSER completion context |
164 | * | 164 | * |
165 | * @device: pointer to device handle | 165 | * @device: pointer to device handle |
166 | * @rx_cq: RX completion queue | 166 | * @cq: completion queue |
167 | * @tx_cq: TX completion queue | ||
168 | * @active_qps: Number of active QPs attached | 167 | * @active_qps: Number of active QPs attached |
169 | * to completion context | 168 | * to completion context |
170 | * @rx_work: RX work handle | 169 | * @work: completion work handle |
171 | * @tx_work: TX work handle | ||
172 | */ | 170 | */ |
173 | struct isert_comp { | 171 | struct isert_comp { |
174 | struct isert_device *device; | 172 | struct isert_device *device; |
175 | struct ib_cq *rx_cq; | 173 | struct ib_cq *cq; |
176 | struct ib_cq *tx_cq; | ||
177 | int active_qps; | 174 | int active_qps; |
178 | struct work_struct rx_work; | 175 | struct work_struct work; |
179 | struct work_struct tx_work; | ||
180 | }; | 176 | }; |
181 | 177 | ||
182 | struct isert_device { | 178 | struct isert_device { |