diff options
author | Santosh Shilimkar <santosh.shilimkar@oracle.com> | 2016-07-04 19:16:36 -0400 |
---|---|---|
committer | Santosh Shilimkar <santosh.shilimkar@oracle.com> | 2017-01-02 17:02:52 -0500 |
commit | be2f76eacc278c272f26d46e4168efe5a55f5383 (patch) | |
tree | c5d8d170f37ff0704d3a87b55e493ea794f8e841 /net/rds | |
parent | 09b2b8f52895addd9bf28dc5ac98ff5cc750cf9a (diff) |
RDS: IB: Add vector spreading for cqs
Based on available device vectors, allocate cqs accordingly to
get better spread of completion vectors which helps performace
great deal..
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/ib.c | 11 | ||||
-rw-r--r-- | net/rds/ib.h | 5 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 40 |
3 files changed, 53 insertions, 3 deletions
diff --git a/net/rds/ib.c b/net/rds/ib.c index 5680d90b0b77..8d70884d7bb6 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -111,6 +111,9 @@ static void rds_ib_dev_free(struct work_struct *work) | |||
111 | kfree(i_ipaddr); | 111 | kfree(i_ipaddr); |
112 | } | 112 | } |
113 | 113 | ||
114 | if (rds_ibdev->vector_load) | ||
115 | kfree(rds_ibdev->vector_load); | ||
116 | |||
114 | kfree(rds_ibdev); | 117 | kfree(rds_ibdev); |
115 | } | 118 | } |
116 | 119 | ||
@@ -159,6 +162,14 @@ static void rds_ib_add_one(struct ib_device *device) | |||
159 | rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; | 162 | rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; |
160 | rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; | 163 | rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; |
161 | 164 | ||
165 | rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors, | ||
166 | GFP_KERNEL); | ||
167 | if (!rds_ibdev->vector_load) { | ||
168 | pr_err("RDS/IB: %s failed to allocate vector memory\n", | ||
169 | __func__); | ||
170 | goto put_dev; | ||
171 | } | ||
172 | |||
162 | rds_ibdev->dev = device; | 173 | rds_ibdev->dev = device; |
163 | rds_ibdev->pd = ib_alloc_pd(device, 0); | 174 | rds_ibdev->pd = ib_alloc_pd(device, 0); |
164 | if (IS_ERR(rds_ibdev->pd)) { | 175 | if (IS_ERR(rds_ibdev->pd)) { |
diff --git a/net/rds/ib.h b/net/rds/ib.h index c62e5513d306..1fe9f79fead5 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -185,6 +185,10 @@ struct rds_ib_connection { | |||
185 | 185 | ||
186 | /* Endpoint role in connection */ | 186 | /* Endpoint role in connection */ |
187 | bool i_active_side; | 187 | bool i_active_side; |
188 | |||
189 | /* Send/Recv vectors */ | ||
190 | int i_scq_vector; | ||
191 | int i_rcq_vector; | ||
188 | }; | 192 | }; |
189 | 193 | ||
190 | /* This assumes that atomic_t is at least 32 bits */ | 194 | /* This assumes that atomic_t is at least 32 bits */ |
@@ -227,6 +231,7 @@ struct rds_ib_device { | |||
227 | spinlock_t spinlock; /* protect the above */ | 231 | spinlock_t spinlock; /* protect the above */ |
228 | atomic_t refcount; | 232 | atomic_t refcount; |
229 | struct work_struct free_work; | 233 | struct work_struct free_work; |
234 | int *vector_load; | ||
230 | }; | 235 | }; |
231 | 236 | ||
232 | #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) | 237 | #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 4d1bf04b06b5..33c8584ada1f 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -358,6 +358,28 @@ static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) | |||
358 | tasklet_schedule(&ic->i_send_tasklet); | 358 | tasklet_schedule(&ic->i_send_tasklet); |
359 | } | 359 | } |
360 | 360 | ||
361 | static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev) | ||
362 | { | ||
363 | int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; | ||
364 | int index = rds_ibdev->dev->num_comp_vectors - 1; | ||
365 | int i; | ||
366 | |||
367 | for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { | ||
368 | if (rds_ibdev->vector_load[i] < min) { | ||
369 | index = i; | ||
370 | min = rds_ibdev->vector_load[i]; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | rds_ibdev->vector_load[index]++; | ||
375 | return index; | ||
376 | } | ||
377 | |||
378 | static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index) | ||
379 | { | ||
380 | rds_ibdev->vector_load[index]--; | ||
381 | } | ||
382 | |||
361 | /* | 383 | /* |
362 | * This needs to be very careful to not leave IS_ERR pointers around for | 384 | * This needs to be very careful to not leave IS_ERR pointers around for |
363 | * cleanup to trip over. | 385 | * cleanup to trip over. |
@@ -399,25 +421,30 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
399 | /* Protection domain and memory range */ | 421 | /* Protection domain and memory range */ |
400 | ic->i_pd = rds_ibdev->pd; | 422 | ic->i_pd = rds_ibdev->pd; |
401 | 423 | ||
424 | ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); | ||
402 | cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; | 425 | cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; |
403 | 426 | cq_attr.comp_vector = ic->i_scq_vector; | |
404 | ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, | 427 | ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, |
405 | rds_ib_cq_event_handler, conn, | 428 | rds_ib_cq_event_handler, conn, |
406 | &cq_attr); | 429 | &cq_attr); |
407 | if (IS_ERR(ic->i_send_cq)) { | 430 | if (IS_ERR(ic->i_send_cq)) { |
408 | ret = PTR_ERR(ic->i_send_cq); | 431 | ret = PTR_ERR(ic->i_send_cq); |
409 | ic->i_send_cq = NULL; | 432 | ic->i_send_cq = NULL; |
433 | ibdev_put_vector(rds_ibdev, ic->i_scq_vector); | ||
410 | rdsdebug("ib_create_cq send failed: %d\n", ret); | 434 | rdsdebug("ib_create_cq send failed: %d\n", ret); |
411 | goto out; | 435 | goto out; |
412 | } | 436 | } |
413 | 437 | ||
438 | ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); | ||
414 | cq_attr.cqe = ic->i_recv_ring.w_nr; | 439 | cq_attr.cqe = ic->i_recv_ring.w_nr; |
440 | cq_attr.comp_vector = ic->i_rcq_vector; | ||
415 | ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, | 441 | ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, |
416 | rds_ib_cq_event_handler, conn, | 442 | rds_ib_cq_event_handler, conn, |
417 | &cq_attr); | 443 | &cq_attr); |
418 | if (IS_ERR(ic->i_recv_cq)) { | 444 | if (IS_ERR(ic->i_recv_cq)) { |
419 | ret = PTR_ERR(ic->i_recv_cq); | 445 | ret = PTR_ERR(ic->i_recv_cq); |
420 | ic->i_recv_cq = NULL; | 446 | ic->i_recv_cq = NULL; |
447 | ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); | ||
421 | rdsdebug("ib_create_cq recv failed: %d\n", ret); | 448 | rdsdebug("ib_create_cq recv failed: %d\n", ret); |
422 | goto out; | 449 | goto out; |
423 | } | 450 | } |
@@ -780,10 +807,17 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) | |||
780 | /* first destroy the ib state that generates callbacks */ | 807 | /* first destroy the ib state that generates callbacks */ |
781 | if (ic->i_cm_id->qp) | 808 | if (ic->i_cm_id->qp) |
782 | rdma_destroy_qp(ic->i_cm_id); | 809 | rdma_destroy_qp(ic->i_cm_id); |
783 | if (ic->i_send_cq) | 810 | if (ic->i_send_cq) { |
811 | if (ic->rds_ibdev) | ||
812 | ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); | ||
784 | ib_destroy_cq(ic->i_send_cq); | 813 | ib_destroy_cq(ic->i_send_cq); |
785 | if (ic->i_recv_cq) | 814 | } |
815 | |||
816 | if (ic->i_recv_cq) { | ||
817 | if (ic->rds_ibdev) | ||
818 | ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); | ||
786 | ib_destroy_cq(ic->i_recv_cq); | 819 | ib_destroy_cq(ic->i_recv_cq); |
820 | } | ||
787 | 821 | ||
788 | /* then free the resources that ib callbacks use */ | 822 | /* then free the resources that ib callbacks use */ |
789 | if (ic->i_send_hdrs) | 823 | if (ic->i_send_hdrs) |