diff options
author | santosh.shilimkar@oracle.com <santosh.shilimkar@oracle.com> | 2015-08-25 15:02:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-08-25 19:28:11 -0400 |
commit | ad1d7dc0d79d3dd2c5d2931b13edbd4fe33e5fac (patch) | |
tree | 6ba3d4095461f37e5d8876bfca6735ab47640920 | |
parent | 6116c2030fff91950f68b7fffb5959c91a05aaf6 (diff) |
RDS: push FMR pool flush work to its own worker
RDS FMR flush operation and also it races with connect/reconect
which happes a lot with RDS. FMR flush being on common rds_wq aggrevates
the problem. Lets push RDS FMR pool flush work to its own worker.
Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/rds/ib.c | 9 | ||||
-rw-r--r-- | net/rds/ib.h | 2 | ||||
-rw-r--r-- | net/rds/ib_rdma.c | 27 |
3 files changed, 34 insertions, 4 deletions
diff --git a/net/rds/ib.c b/net/rds/ib.c index 13814227b3b2..d020fade312c 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -366,6 +366,7 @@ void rds_ib_exit(void) | |||
366 | rds_ib_sysctl_exit(); | 366 | rds_ib_sysctl_exit(); |
367 | rds_ib_recv_exit(); | 367 | rds_ib_recv_exit(); |
368 | rds_trans_unregister(&rds_ib_transport); | 368 | rds_trans_unregister(&rds_ib_transport); |
369 | rds_ib_fmr_exit(); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | struct rds_transport rds_ib_transport = { | 372 | struct rds_transport rds_ib_transport = { |
@@ -401,10 +402,14 @@ int rds_ib_init(void) | |||
401 | 402 | ||
402 | INIT_LIST_HEAD(&rds_ib_devices); | 403 | INIT_LIST_HEAD(&rds_ib_devices); |
403 | 404 | ||
404 | ret = ib_register_client(&rds_ib_client); | 405 | ret = rds_ib_fmr_init(); |
405 | if (ret) | 406 | if (ret) |
406 | goto out; | 407 | goto out; |
407 | 408 | ||
409 | ret = ib_register_client(&rds_ib_client); | ||
410 | if (ret) | ||
411 | goto out_fmr_exit; | ||
412 | |||
408 | ret = rds_ib_sysctl_init(); | 413 | ret = rds_ib_sysctl_init(); |
409 | if (ret) | 414 | if (ret) |
410 | goto out_ibreg; | 415 | goto out_ibreg; |
@@ -427,6 +432,8 @@ out_sysctl: | |||
427 | rds_ib_sysctl_exit(); | 432 | rds_ib_sysctl_exit(); |
428 | out_ibreg: | 433 | out_ibreg: |
429 | rds_ib_unregister_client(); | 434 | rds_ib_unregister_client(); |
435 | out_fmr_exit: | ||
436 | rds_ib_fmr_exit(); | ||
430 | out: | 437 | out: |
431 | return ret; | 438 | return ret; |
432 | } | 439 | } |
diff --git a/net/rds/ib.h b/net/rds/ib.h index 6422c52682e5..9fc95e38659a 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -313,6 +313,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
313 | void rds_ib_sync_mr(void *trans_private, int dir); | 313 | void rds_ib_sync_mr(void *trans_private, int dir); |
314 | void rds_ib_free_mr(void *trans_private, int invalidate); | 314 | void rds_ib_free_mr(void *trans_private, int invalidate); |
315 | void rds_ib_flush_mrs(void); | 315 | void rds_ib_flush_mrs(void); |
316 | int rds_ib_fmr_init(void); | ||
317 | void rds_ib_fmr_exit(void); | ||
316 | 318 | ||
317 | /* ib_recv.c */ | 319 | /* ib_recv.c */ |
318 | int rds_ib_recv_init(void); | 320 | int rds_ib_recv_init(void); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index a275b7d205ef..2ac78c9879ea 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -83,6 +83,25 @@ struct rds_ib_mr_pool { | |||
83 | struct ib_fmr_attr fmr_attr; | 83 | struct ib_fmr_attr fmr_attr; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct workqueue_struct *rds_ib_fmr_wq; | ||
87 | |||
88 | int rds_ib_fmr_init(void) | ||
89 | { | ||
90 | rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd"); | ||
91 | if (!rds_ib_fmr_wq) | ||
92 | return -ENOMEM; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* By the time this is called all the IB devices should have been torn down and | ||
97 | * had their pools freed. As each pool is freed its work struct is waited on, | ||
98 | * so the pool flushing work queue should be idle by the time we get here. | ||
99 | */ | ||
100 | void rds_ib_fmr_exit(void) | ||
101 | { | ||
102 | destroy_workqueue(rds_ib_fmr_wq); | ||
103 | } | ||
104 | |||
86 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); | 105 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); |
87 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); | 106 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); |
88 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); | 107 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); |
@@ -719,15 +738,17 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
719 | /* If we've pinned too many pages, request a flush */ | 738 | /* If we've pinned too many pages, request a flush */ |
720 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 739 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
721 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 740 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
722 | schedule_delayed_work(&pool->flush_worker, 10); | 741 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); |
723 | 742 | ||
724 | if (invalidate) { | 743 | if (invalidate) { |
725 | if (likely(!in_interrupt())) { | 744 | if (likely(!in_interrupt())) { |
726 | rds_ib_flush_mr_pool(pool, 0, NULL); | 745 | rds_ib_flush_mr_pool(pool, 0, NULL); |
727 | } else { | 746 | } else { |
728 | /* We get here if the user created a MR marked | 747 | /* We get here if the user created a MR marked |
729 | * as use_once and invalidate at the same time. */ | 748 | * as use_once and invalidate at the same time. |
730 | schedule_delayed_work(&pool->flush_worker, 10); | 749 | */ |
750 | queue_delayed_work(rds_ib_fmr_wq, | ||
751 | &pool->flush_worker, 10); | ||
731 | } | 752 | } |
732 | } | 753 | } |
733 | 754 | ||