aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_rdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rds/ib_rdma.c')
-rw-r--r--net/rds/ib_rdma.c297
1 files changed, 226 insertions, 71 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a54cd63f9e35..819c35a0d9cb 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -32,11 +32,14 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/rculist.h>
35 36
36#include "rds.h" 37#include "rds.h"
37#include "rdma.h"
38#include "ib.h" 38#include "ib.h"
39#include "xlist.h"
39 40
41static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0
40 43
41/* 44/*
42 * This is stored as mr->r_trans_private. 45 * This is stored as mr->r_trans_private.
@@ -45,7 +48,11 @@ struct rds_ib_mr {
45 struct rds_ib_device *device; 48 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool; 49 struct rds_ib_mr_pool *pool;
47 struct ib_fmr *fmr; 50 struct ib_fmr *fmr;
48 struct list_head list; 51
52 struct xlist_head xlist;
53
54 /* unmap_list is for freeing */
55 struct list_head unmap_list;
49 unsigned int remap_count; 56 unsigned int remap_count;
50 57
51 struct scatterlist *sg; 58 struct scatterlist *sg;
@@ -59,14 +66,16 @@ struct rds_ib_mr {
59 */ 66 */
60struct rds_ib_mr_pool { 67struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */ 68 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */ 69 struct delayed_work flush_worker; /* flush worker */
63 70
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */ 71 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */ 72 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */ 73
68 struct list_head free_list; /* unused MRs */ 74 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
69 struct list_head clean_list; /* unused & unamapped MRs */ 75 struct xlist_head free_list; /* unused MRs */
76 struct xlist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait;
78
70 atomic_t free_pinned; /* memory pinned by free MRs */ 79 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items; 80 unsigned long max_items;
72 unsigned long max_items_soft; 81 unsigned long max_items_soft;
@@ -74,7 +83,7 @@ struct rds_ib_mr_pool {
74 struct ib_fmr_attr fmr_attr; 83 struct ib_fmr_attr fmr_attr;
75}; 84};
76 85
77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); 86static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); 87static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79static void rds_ib_mr_pool_flush_worker(struct work_struct *work); 88static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
80 89
@@ -83,16 +92,17 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
83 struct rds_ib_device *rds_ibdev; 92 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr; 93 struct rds_ib_ipaddr *i_ipaddr;
85 94
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 95 rcu_read_lock();
87 spin_lock_irq(&rds_ibdev->spinlock); 96 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
88 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 97 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
89 if (i_ipaddr->ipaddr == ipaddr) { 98 if (i_ipaddr->ipaddr == ipaddr) {
90 spin_unlock_irq(&rds_ibdev->spinlock); 99 atomic_inc(&rds_ibdev->refcount);
100 rcu_read_unlock();
91 return rds_ibdev; 101 return rds_ibdev;
92 } 102 }
93 } 103 }
94 spin_unlock_irq(&rds_ibdev->spinlock);
95 } 104 }
105 rcu_read_unlock();
96 106
97 return NULL; 107 return NULL;
98} 108}
@@ -108,7 +118,7 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
108 i_ipaddr->ipaddr = ipaddr; 118 i_ipaddr->ipaddr = ipaddr;
109 119
110 spin_lock_irq(&rds_ibdev->spinlock); 120 spin_lock_irq(&rds_ibdev->spinlock);
111 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); 121 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
112 spin_unlock_irq(&rds_ibdev->spinlock); 122 spin_unlock_irq(&rds_ibdev->spinlock);
113 123
114 return 0; 124 return 0;
@@ -116,17 +126,24 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
116 126
117static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 127static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
118{ 128{
119 struct rds_ib_ipaddr *i_ipaddr, *next; 129 struct rds_ib_ipaddr *i_ipaddr;
130 struct rds_ib_ipaddr *to_free = NULL;
131
120 132
121 spin_lock_irq(&rds_ibdev->spinlock); 133 spin_lock_irq(&rds_ibdev->spinlock);
122 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { 134 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
123 if (i_ipaddr->ipaddr == ipaddr) { 135 if (i_ipaddr->ipaddr == ipaddr) {
124 list_del(&i_ipaddr->list); 136 list_del_rcu(&i_ipaddr->list);
125 kfree(i_ipaddr); 137 to_free = i_ipaddr;
126 break; 138 break;
127 } 139 }
128 } 140 }
129 spin_unlock_irq(&rds_ibdev->spinlock); 141 spin_unlock_irq(&rds_ibdev->spinlock);
142
143 if (to_free) {
144 synchronize_rcu();
145 kfree(to_free);
146 }
130} 147}
131 148
132int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 149int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -134,8 +151,10 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
134 struct rds_ib_device *rds_ibdev_old; 151 struct rds_ib_device *rds_ibdev_old;
135 152
136 rds_ibdev_old = rds_ib_get_device(ipaddr); 153 rds_ibdev_old = rds_ib_get_device(ipaddr);
137 if (rds_ibdev_old) 154 if (rds_ibdev_old) {
138 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); 155 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
156 rds_ib_dev_put(rds_ibdev_old);
157 }
139 158
140 return rds_ib_add_ipaddr(rds_ibdev, ipaddr); 159 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
141} 160}
@@ -150,12 +169,13 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
150 BUG_ON(list_empty(&ic->ib_node)); 169 BUG_ON(list_empty(&ic->ib_node));
151 list_del(&ic->ib_node); 170 list_del(&ic->ib_node);
152 171
153 spin_lock_irq(&rds_ibdev->spinlock); 172 spin_lock(&rds_ibdev->spinlock);
154 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); 173 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
155 spin_unlock_irq(&rds_ibdev->spinlock); 174 spin_unlock(&rds_ibdev->spinlock);
156 spin_unlock_irq(&ib_nodev_conns_lock); 175 spin_unlock_irq(&ib_nodev_conns_lock);
157 176
158 ic->rds_ibdev = rds_ibdev; 177 ic->rds_ibdev = rds_ibdev;
178 atomic_inc(&rds_ibdev->refcount);
159} 179}
160 180
161void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 181void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -175,18 +195,18 @@ void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *
175 spin_unlock(&ib_nodev_conns_lock); 195 spin_unlock(&ib_nodev_conns_lock);
176 196
177 ic->rds_ibdev = NULL; 197 ic->rds_ibdev = NULL;
198 rds_ib_dev_put(rds_ibdev);
178} 199}
179 200
180void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) 201void rds_ib_destroy_nodev_conns(void)
181{ 202{
182 struct rds_ib_connection *ic, *_ic; 203 struct rds_ib_connection *ic, *_ic;
183 LIST_HEAD(tmp_list); 204 LIST_HEAD(tmp_list);
184 205
185 /* avoid calling conn_destroy with irqs off */ 206 /* avoid calling conn_destroy with irqs off */
186 spin_lock_irq(list_lock); 207 spin_lock_irq(&ib_nodev_conns_lock);
187 list_splice(list, &tmp_list); 208 list_splice(&ib_nodev_conns, &tmp_list);
188 INIT_LIST_HEAD(list); 209 spin_unlock_irq(&ib_nodev_conns_lock);
189 spin_unlock_irq(list_lock);
190 210
191 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) 211 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
192 rds_conn_destroy(ic->conn); 212 rds_conn_destroy(ic->conn);
@@ -200,12 +220,12 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
200 if (!pool) 220 if (!pool)
201 return ERR_PTR(-ENOMEM); 221 return ERR_PTR(-ENOMEM);
202 222
203 INIT_LIST_HEAD(&pool->free_list); 223 INIT_XLIST_HEAD(&pool->free_list);
204 INIT_LIST_HEAD(&pool->drop_list); 224 INIT_XLIST_HEAD(&pool->drop_list);
205 INIT_LIST_HEAD(&pool->clean_list); 225 INIT_XLIST_HEAD(&pool->clean_list);
206 mutex_init(&pool->flush_lock); 226 mutex_init(&pool->flush_lock);
207 spin_lock_init(&pool->list_lock); 227 init_waitqueue_head(&pool->flush_wait);
208 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
209 229
210 pool->fmr_attr.max_pages = fmr_message_size; 230 pool->fmr_attr.max_pages = fmr_message_size;
211 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 231 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
@@ -233,34 +253,60 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
233 253
234void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 254void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
235{ 255{
236 flush_workqueue(rds_wq); 256 cancel_delayed_work_sync(&pool->flush_worker);
237 rds_ib_flush_mr_pool(pool, 1); 257 rds_ib_flush_mr_pool(pool, 1, NULL);
238 WARN_ON(atomic_read(&pool->item_count)); 258 WARN_ON(atomic_read(&pool->item_count));
239 WARN_ON(atomic_read(&pool->free_pinned)); 259 WARN_ON(atomic_read(&pool->free_pinned));
240 kfree(pool); 260 kfree(pool);
241} 261}
242 262
263static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
264 struct rds_ib_mr **ibmr_ret)
265{
266 struct xlist_head *ibmr_xl;
267 ibmr_xl = xlist_del_head_fast(xl);
268 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
269}
270
243static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 271static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
244{ 272{
245 struct rds_ib_mr *ibmr = NULL; 273 struct rds_ib_mr *ibmr = NULL;
246 unsigned long flags; 274 struct xlist_head *ret;
275 unsigned long *flag;
247 276
248 spin_lock_irqsave(&pool->list_lock, flags); 277 preempt_disable();
249 if (!list_empty(&pool->clean_list)) { 278 flag = &__get_cpu_var(clean_list_grace);
250 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); 279 set_bit(CLEAN_LIST_BUSY_BIT, flag);
251 list_del_init(&ibmr->list); 280 ret = xlist_del_head(&pool->clean_list);
252 } 281 if (ret)
253 spin_unlock_irqrestore(&pool->list_lock, flags); 282 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
254 283
284 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
285 preempt_enable();
255 return ibmr; 286 return ibmr;
256} 287}
257 288
289static inline void wait_clean_list_grace(void)
290{
291 int cpu;
292 unsigned long *flag;
293
294 for_each_online_cpu(cpu) {
295 flag = &per_cpu(clean_list_grace, cpu);
296 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
297 cpu_relax();
298 }
299}
300
258static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) 301static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
259{ 302{
260 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 303 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
261 struct rds_ib_mr *ibmr = NULL; 304 struct rds_ib_mr *ibmr = NULL;
262 int err = 0, iter = 0; 305 int err = 0, iter = 0;
263 306
307 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
308 schedule_delayed_work(&pool->flush_worker, 10);
309
264 while (1) { 310 while (1) {
265 ibmr = rds_ib_reuse_fmr(pool); 311 ibmr = rds_ib_reuse_fmr(pool);
266 if (ibmr) 312 if (ibmr)
@@ -287,19 +333,24 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
287 333
288 /* We do have some empty MRs. Flush them out. */ 334 /* We do have some empty MRs. Flush them out. */
289 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); 335 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
290 rds_ib_flush_mr_pool(pool, 0); 336 rds_ib_flush_mr_pool(pool, 0, &ibmr);
337 if (ibmr)
338 return ibmr;
291 } 339 }
292 340
293 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); 341 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
294 if (!ibmr) { 342 if (!ibmr) {
295 err = -ENOMEM; 343 err = -ENOMEM;
296 goto out_no_cigar; 344 goto out_no_cigar;
297 } 345 }
298 346
347 memset(ibmr, 0, sizeof(*ibmr));
348
299 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, 349 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
300 (IB_ACCESS_LOCAL_WRITE | 350 (IB_ACCESS_LOCAL_WRITE |
301 IB_ACCESS_REMOTE_READ | 351 IB_ACCESS_REMOTE_READ |
302 IB_ACCESS_REMOTE_WRITE), 352 IB_ACCESS_REMOTE_WRITE|
353 IB_ACCESS_REMOTE_ATOMIC),
303 &pool->fmr_attr); 354 &pool->fmr_attr);
304 if (IS_ERR(ibmr->fmr)) { 355 if (IS_ERR(ibmr->fmr)) {
305 err = PTR_ERR(ibmr->fmr); 356 err = PTR_ERR(ibmr->fmr);
@@ -367,7 +418,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
367 if (page_cnt > fmr_message_size) 418 if (page_cnt > fmr_message_size)
368 return -EINVAL; 419 return -EINVAL;
369 420
370 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); 421 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
422 rdsibdev_to_node(rds_ibdev));
371 if (!dma_pages) 423 if (!dma_pages)
372 return -ENOMEM; 424 return -ENOMEM;
373 425
@@ -441,7 +493,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
441 493
442 /* FIXME we need a way to tell a r/w MR 494 /* FIXME we need a way to tell a r/w MR
443 * from a r/o MR */ 495 * from a r/o MR */
444 BUG_ON(in_interrupt()); 496 BUG_ON(irqs_disabled());
445 set_page_dirty(page); 497 set_page_dirty(page);
446 put_page(page); 498 put_page(page);
447 } 499 }
@@ -477,33 +529,109 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
477} 529}
478 530
479/* 531/*
532 * given an xlist of mrs, put them all into the list_head for more processing
533 */
534static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
535{
536 struct rds_ib_mr *ibmr;
537 struct xlist_head splice;
538 struct xlist_head *cur;
539 struct xlist_head *next;
540
541 splice.next = NULL;
542 xlist_splice(xlist, &splice);
543 cur = splice.next;
544 while (cur) {
545 next = cur->next;
546 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
547 list_add_tail(&ibmr->unmap_list, list);
548 cur = next;
549 }
550}
551
552/*
553 * this takes a list head of mrs and turns it into an xlist of clusters.
554 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
555 * reuse.
556 */
557static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
558 struct list_head *list, struct xlist_head *xlist,
559 struct xlist_head **tail_ret)
560{
561 struct rds_ib_mr *ibmr;
562 struct xlist_head *cur_mr = xlist;
563 struct xlist_head *tail_mr = NULL;
564
565 list_for_each_entry(ibmr, list, unmap_list) {
566 tail_mr = &ibmr->xlist;
567 tail_mr->next = NULL;
568 cur_mr->next = tail_mr;
569 cur_mr = tail_mr;
570 }
571 *tail_ret = tail_mr;
572}
573
574/*
480 * Flush our pool of MRs. 575 * Flush our pool of MRs.
481 * At a minimum, all currently unused MRs are unmapped. 576 * At a minimum, all currently unused MRs are unmapped.
482 * If the number of MRs allocated exceeds the limit, we also try 577 * If the number of MRs allocated exceeds the limit, we also try
483 * to free as many MRs as needed to get back to this limit. 578 * to free as many MRs as needed to get back to this limit.
484 */ 579 */
485static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) 580static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
581 int free_all, struct rds_ib_mr **ibmr_ret)
486{ 582{
487 struct rds_ib_mr *ibmr, *next; 583 struct rds_ib_mr *ibmr, *next;
584 struct xlist_head clean_xlist;
585 struct xlist_head *clean_tail;
488 LIST_HEAD(unmap_list); 586 LIST_HEAD(unmap_list);
489 LIST_HEAD(fmr_list); 587 LIST_HEAD(fmr_list);
490 unsigned long unpinned = 0; 588 unsigned long unpinned = 0;
491 unsigned long flags;
492 unsigned int nfreed = 0, ncleaned = 0, free_goal; 589 unsigned int nfreed = 0, ncleaned = 0, free_goal;
493 int ret = 0; 590 int ret = 0;
494 591
495 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); 592 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
496 593
497 mutex_lock(&pool->flush_lock); 594 if (ibmr_ret) {
595 DEFINE_WAIT(wait);
596 while(!mutex_trylock(&pool->flush_lock)) {
597 ibmr = rds_ib_reuse_fmr(pool);
598 if (ibmr) {
599 *ibmr_ret = ibmr;
600 finish_wait(&pool->flush_wait, &wait);
601 goto out_nolock;
602 }
603
604 prepare_to_wait(&pool->flush_wait, &wait,
605 TASK_UNINTERRUPTIBLE);
606 if (xlist_empty(&pool->clean_list))
607 schedule();
608
609 ibmr = rds_ib_reuse_fmr(pool);
610 if (ibmr) {
611 *ibmr_ret = ibmr;
612 finish_wait(&pool->flush_wait, &wait);
613 goto out_nolock;
614 }
615 }
616 finish_wait(&pool->flush_wait, &wait);
617 } else
618 mutex_lock(&pool->flush_lock);
619
620 if (ibmr_ret) {
621 ibmr = rds_ib_reuse_fmr(pool);
622 if (ibmr) {
623 *ibmr_ret = ibmr;
624 goto out;
625 }
626 }
498 627
499 spin_lock_irqsave(&pool->list_lock, flags);
500 /* Get the list of all MRs to be dropped. Ordering matters - 628 /* Get the list of all MRs to be dropped. Ordering matters -
501 * we want to put drop_list ahead of free_list. */ 629 * we want to put drop_list ahead of free_list.
502 list_splice_init(&pool->free_list, &unmap_list); 630 */
503 list_splice_init(&pool->drop_list, &unmap_list); 631 xlist_append_to_list(&pool->drop_list, &unmap_list);
632 xlist_append_to_list(&pool->free_list, &unmap_list);
504 if (free_all) 633 if (free_all)
505 list_splice_init(&pool->clean_list, &unmap_list); 634 xlist_append_to_list(&pool->clean_list, &unmap_list);
506 spin_unlock_irqrestore(&pool->list_lock, flags);
507 635
508 free_goal = rds_ib_flush_goal(pool, free_all); 636 free_goal = rds_ib_flush_goal(pool, free_all);
509 637
@@ -511,19 +639,20 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
511 goto out; 639 goto out;
512 640
513 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ 641 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
514 list_for_each_entry(ibmr, &unmap_list, list) 642 list_for_each_entry(ibmr, &unmap_list, unmap_list)
515 list_add(&ibmr->fmr->list, &fmr_list); 643 list_add(&ibmr->fmr->list, &fmr_list);
644
516 ret = ib_unmap_fmr(&fmr_list); 645 ret = ib_unmap_fmr(&fmr_list);
517 if (ret) 646 if (ret)
518 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); 647 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
519 648
520 /* Now we can destroy the DMA mapping and unpin any pages */ 649 /* Now we can destroy the DMA mapping and unpin any pages */
521 list_for_each_entry_safe(ibmr, next, &unmap_list, list) { 650 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
522 unpinned += ibmr->sg_len; 651 unpinned += ibmr->sg_len;
523 __rds_ib_teardown_mr(ibmr); 652 __rds_ib_teardown_mr(ibmr);
524 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { 653 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
525 rds_ib_stats_inc(s_ib_rdma_mr_free); 654 rds_ib_stats_inc(s_ib_rdma_mr_free);
526 list_del(&ibmr->list); 655 list_del(&ibmr->unmap_list);
527 ib_dealloc_fmr(ibmr->fmr); 656 ib_dealloc_fmr(ibmr->fmr);
528 kfree(ibmr); 657 kfree(ibmr);
529 nfreed++; 658 nfreed++;
@@ -531,9 +660,27 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
531 ncleaned++; 660 ncleaned++;
532 } 661 }
533 662
534 spin_lock_irqsave(&pool->list_lock, flags); 663 if (!list_empty(&unmap_list)) {
535 list_splice(&unmap_list, &pool->clean_list); 664 /* we have to make sure that none of the things we're about
536 spin_unlock_irqrestore(&pool->list_lock, flags); 665 * to put on the clean list would race with other cpus trying
666 * to pull items off. The xlist would explode if we managed to
667 * remove something from the clean list and then add it back again
668 * while another CPU was spinning on that same item in xlist_del_head.
669 *
670 * This is pretty unlikely, but just in case wait for an xlist grace period
671 * here before adding anything back into the clean list.
672 */
673 wait_clean_list_grace();
674
675 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
676 if (ibmr_ret)
677 refill_local(pool, &clean_xlist, ibmr_ret);
678
679 /* refill_local may have emptied our list */
680 if (!xlist_empty(&clean_xlist))
681 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
682
683 }
537 684
538 atomic_sub(unpinned, &pool->free_pinned); 685 atomic_sub(unpinned, &pool->free_pinned);
539 atomic_sub(ncleaned, &pool->dirty_count); 686 atomic_sub(ncleaned, &pool->dirty_count);
@@ -541,14 +688,17 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
541 688
542out: 689out:
543 mutex_unlock(&pool->flush_lock); 690 mutex_unlock(&pool->flush_lock);
691 if (waitqueue_active(&pool->flush_wait))
692 wake_up(&pool->flush_wait);
693out_nolock:
544 return ret; 694 return ret;
545} 695}
546 696
547static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 697static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
548{ 698{
549 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); 699 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
550 700
551 rds_ib_flush_mr_pool(pool, 0); 701 rds_ib_flush_mr_pool(pool, 0, NULL);
552} 702}
553 703
554void rds_ib_free_mr(void *trans_private, int invalidate) 704void rds_ib_free_mr(void *trans_private, int invalidate)
@@ -556,47 +706,48 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
556 struct rds_ib_mr *ibmr = trans_private; 706 struct rds_ib_mr *ibmr = trans_private;
557 struct rds_ib_device *rds_ibdev = ibmr->device; 707 struct rds_ib_device *rds_ibdev = ibmr->device;
558 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 708 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
559 unsigned long flags;
560 709
561 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 710 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
562 711
563 /* Return it to the pool's free list */ 712 /* Return it to the pool's free list */
564 spin_lock_irqsave(&pool->list_lock, flags);
565 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 713 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
566 list_add(&ibmr->list, &pool->drop_list); 714 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
567 else 715 else
568 list_add(&ibmr->list, &pool->free_list); 716 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
569 717
570 atomic_add(ibmr->sg_len, &pool->free_pinned); 718 atomic_add(ibmr->sg_len, &pool->free_pinned);
571 atomic_inc(&pool->dirty_count); 719 atomic_inc(&pool->dirty_count);
572 spin_unlock_irqrestore(&pool->list_lock, flags);
573 720
574 /* If we've pinned too many pages, request a flush */ 721 /* If we've pinned too many pages, request a flush */
575 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 722 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
576 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 723 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
577 queue_work(rds_wq, &pool->flush_worker); 724 schedule_delayed_work(&pool->flush_worker, 10);
578 725
579 if (invalidate) { 726 if (invalidate) {
580 if (likely(!in_interrupt())) { 727 if (likely(!in_interrupt())) {
581 rds_ib_flush_mr_pool(pool, 0); 728 rds_ib_flush_mr_pool(pool, 0, NULL);
582 } else { 729 } else {
583 /* We get here if the user created a MR marked 730 /* We get here if the user created a MR marked
584 * as use_once and invalidate at the same time. */ 731 * as use_once and invalidate at the same time. */
585 queue_work(rds_wq, &pool->flush_worker); 732 schedule_delayed_work(&pool->flush_worker, 10);
586 } 733 }
587 } 734 }
735
736 rds_ib_dev_put(rds_ibdev);
588} 737}
589 738
590void rds_ib_flush_mrs(void) 739void rds_ib_flush_mrs(void)
591{ 740{
592 struct rds_ib_device *rds_ibdev; 741 struct rds_ib_device *rds_ibdev;
593 742
743 down_read(&rds_ib_devices_lock);
594 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 744 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
595 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 745 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
596 746
597 if (pool) 747 if (pool)
598 rds_ib_flush_mr_pool(pool, 0); 748 rds_ib_flush_mr_pool(pool, 0, NULL);
599 } 749 }
750 up_read(&rds_ib_devices_lock);
600} 751}
601 752
602void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 753void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -628,6 +779,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
628 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); 779 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
629 780
630 ibmr->device = rds_ibdev; 781 ibmr->device = rds_ibdev;
782 rds_ibdev = NULL;
631 783
632 out: 784 out:
633 if (ret) { 785 if (ret) {
@@ -635,5 +787,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
635 rds_ib_free_mr(ibmr, 0); 787 rds_ib_free_mr(ibmr, 0);
636 ibmr = ERR_PTR(ret); 788 ibmr = ERR_PTR(ret);
637 } 789 }
790 if (rds_ibdev)
791 rds_ib_dev_put(rds_ibdev);
638 return ibmr; 792 return ibmr;
639} 793}
794