aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/rds/ib.c9
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_rdma.c27
3 files changed, 34 insertions, 4 deletions
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 13814227b3b2..d020fade312c 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -366,6 +366,7 @@ void rds_ib_exit(void)
366 rds_ib_sysctl_exit(); 366 rds_ib_sysctl_exit();
367 rds_ib_recv_exit(); 367 rds_ib_recv_exit();
368 rds_trans_unregister(&rds_ib_transport); 368 rds_trans_unregister(&rds_ib_transport);
369 rds_ib_fmr_exit();
369} 370}
370 371
371struct rds_transport rds_ib_transport = { 372struct rds_transport rds_ib_transport = {
@@ -401,10 +402,14 @@ int rds_ib_init(void)
401 402
402 INIT_LIST_HEAD(&rds_ib_devices); 403 INIT_LIST_HEAD(&rds_ib_devices);
403 404
404 ret = ib_register_client(&rds_ib_client); 405 ret = rds_ib_fmr_init();
405 if (ret) 406 if (ret)
406 goto out; 407 goto out;
407 408
409 ret = ib_register_client(&rds_ib_client);
410 if (ret)
411 goto out_fmr_exit;
412
408 ret = rds_ib_sysctl_init(); 413 ret = rds_ib_sysctl_init();
409 if (ret) 414 if (ret)
410 goto out_ibreg; 415 goto out_ibreg;
@@ -427,6 +432,8 @@ out_sysctl:
427 rds_ib_sysctl_exit(); 432 rds_ib_sysctl_exit();
428out_ibreg: 433out_ibreg:
429 rds_ib_unregister_client(); 434 rds_ib_unregister_client();
435out_fmr_exit:
436 rds_ib_fmr_exit();
430out: 437out:
431 return ret; 438 return ret;
432} 439}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 6422c52682e5..9fc95e38659a 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -313,6 +313,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
313void rds_ib_sync_mr(void *trans_private, int dir); 313void rds_ib_sync_mr(void *trans_private, int dir);
314void rds_ib_free_mr(void *trans_private, int invalidate); 314void rds_ib_free_mr(void *trans_private, int invalidate);
315void rds_ib_flush_mrs(void); 315void rds_ib_flush_mrs(void);
316int rds_ib_fmr_init(void);
317void rds_ib_fmr_exit(void);
316 318
317/* ib_recv.c */ 319/* ib_recv.c */
318int rds_ib_recv_init(void); 320int rds_ib_recv_init(void);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a275b7d205ef..2ac78c9879ea 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -83,6 +83,25 @@ struct rds_ib_mr_pool {
83 struct ib_fmr_attr fmr_attr; 83 struct ib_fmr_attr fmr_attr;
84}; 84};
85 85
86struct workqueue_struct *rds_ib_fmr_wq;
87
88int rds_ib_fmr_init(void)
89{
90 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
91 if (!rds_ib_fmr_wq)
92 return -ENOMEM;
93 return 0;
94}
95
96/* By the time this is called all the IB devices should have been torn down and
97 * had their pools freed. As each pool is freed its work struct is waited on,
98 * so the pool flushing work queue should be idle by the time we get here.
99 */
100void rds_ib_fmr_exit(void)
101{
102 destroy_workqueue(rds_ib_fmr_wq);
103}
104
86static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); 105static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
87static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); 106static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
88static void rds_ib_mr_pool_flush_worker(struct work_struct *work); 107static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -719,15 +738,17 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
719 /* If we've pinned too many pages, request a flush */ 738 /* If we've pinned too many pages, request a flush */
720 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 739 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
721 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 740 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
722 schedule_delayed_work(&pool->flush_worker, 10); 741 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
723 742
724 if (invalidate) { 743 if (invalidate) {
725 if (likely(!in_interrupt())) { 744 if (likely(!in_interrupt())) {
726 rds_ib_flush_mr_pool(pool, 0, NULL); 745 rds_ib_flush_mr_pool(pool, 0, NULL);
727 } else { 746 } else {
728 /* We get here if the user created a MR marked 747 /* We get here if the user created a MR marked
729 * as use_once and invalidate at the same time. */ 748 * as use_once and invalidate at the same time.
730 schedule_delayed_work(&pool->flush_worker, 10); 749 */
750 queue_delayed_work(rds_ib_fmr_wq,
751 &pool->flush_worker, 10);
731 } 752 }
732 } 753 }
733 754