diff options
author | Olaf Kirch <olaf.kirch@oracle.com> | 2008-01-16 12:37:16 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-01-25 17:15:43 -0500 |
commit | a3cd7d9070be417a21905c997ee32d756d999b38 (patch) | |
tree | 1576c829374700bb774ed6e560aa9be318240526 /drivers/infiniband | |
parent | a656eb758fc6e6a42659ecf5ba34a5c5a2aeec17 (diff) |
IB/fmr_pool: ib_fmr_pool_flush() should flush all dirty FMRs
When a FMR is released via ib_fmr_pool_unmap(), the FMR usually ends
up on the free_list rather than the dirty_list (because we allow a
certain number of remappings before actually requiring a flush).
However, ib_fmr_batch_release() only looks at dirty_list when flushing
out old mappings. This means that when ib_fmr_pool_flush() is used to
force a flush of the FMR pool, some dirty FMRs that have not reached
their maximum remap count will not actually be flushed.
Fix this by flushing all FMRs that have been used at least once in
ib_fmr_batch_release().
Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/fmr_pool.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 4a476a870214..6c7aa59794d4 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, | |||
139 | static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | 139 | static void ib_fmr_batch_release(struct ib_fmr_pool *pool) |
140 | { | 140 | { |
141 | int ret; | 141 | int ret; |
142 | struct ib_pool_fmr *fmr; | 142 | struct ib_pool_fmr *fmr, *next; |
143 | LIST_HEAD(unmap_list); | 143 | LIST_HEAD(unmap_list); |
144 | LIST_HEAD(fmr_list); | 144 | LIST_HEAD(fmr_list); |
145 | 145 | ||
@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | |||
158 | #endif | 158 | #endif |
159 | } | 159 | } |
160 | 160 | ||
161 | /* | ||
162 | * The free_list may hold FMRs that have been put there | ||
163 | * because they haven't reached the max_remap count. | ||
164 | * Invalidate their mapping as well. | ||
165 | */ | ||
166 | list_for_each_entry_safe(fmr, next, &pool->free_list, list) { | ||
167 | if (fmr->remap_count == 0) | ||
168 | continue; | ||
169 | hlist_del_init(&fmr->cache_node); | ||
170 | fmr->remap_count = 0; | ||
171 | list_add_tail(&fmr->fmr->list, &fmr_list); | ||
172 | list_move(&fmr->list, &unmap_list); | ||
173 | } | ||
174 | |||
161 | list_splice(&pool->dirty_list, &unmap_list); | 175 | list_splice(&pool->dirty_list, &unmap_list); |
162 | INIT_LIST_HEAD(&pool->dirty_list); | 176 | INIT_LIST_HEAD(&pool->dirty_list); |
163 | pool->dirty_len = 0; | 177 | pool->dirty_len = 0; |
@@ -367,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) | |||
367 | 381 | ||
368 | i = 0; | 382 | i = 0; |
369 | list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { | 383 | list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { |
370 | if (fmr->remap_count) { | ||
371 | INIT_LIST_HEAD(&fmr_list); | ||
372 | list_add_tail(&fmr->fmr->list, &fmr_list); | ||
373 | ib_unmap_fmr(&fmr_list); | ||
374 | } | ||
375 | ib_dealloc_fmr(fmr->fmr); | 384 | ib_dealloc_fmr(fmr->fmr); |
376 | list_del(&fmr->list); | 385 | list_del(&fmr->list); |
377 | kfree(fmr); | 386 | kfree(fmr); |