aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorOlaf Kirch <olaf.kirch@oracle.com>2008-01-16 12:36:27 -0500
committerRoland Dreier <rolandd@cisco.com>2008-01-25 17:15:42 -0500
commita656eb758fc6e6a42659ecf5ba34a5c5a2aeec17 (patch)
tree779bc9dea9396aa1907528472108a90ef3f2bce8 /drivers/infiniband
parent2fe7e6f7c9f55eac24c5b3cdf56af29ab9b0ca81 (diff)
IB/fmr_pool: Flush serial numbers can get out of sync
Normally, the serial numbers for flush requests and flushes executed for an FMR pool should be in sync. However, if the FMR pool flushes dirty FMRs because the dirty_watermark was reached, we wake up the cleanup thread and let it do its stuff. As a side effect, the cleanup thread increments pool->flush_ser, which leaves it one higher than pool->req_ser. The next time the user calls ib_flush_fmr_pool(), the cleanup thread will be woken up, but ib_flush_fmr_pool() won't wait for the flush to complete because flush_ser is already past req_ser. This means the FMRs that the user expects to be flushed may not have all been flushed when the function returns. Fix this by telling the cleanup thread to do work exclusively by incrementing req_ser, and by moving the comparison of dirty_len and dirty_watermark into ib_fmr_pool_unmap(). Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index e8d5f6b64998..4a476a870214 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -182,8 +182,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
182 struct ib_fmr_pool *pool = pool_ptr; 182 struct ib_fmr_pool *pool = pool_ptr;
183 183
184 do { 184 do {
185 if (pool->dirty_len >= pool->dirty_watermark || 185 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
186 atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
187 ib_fmr_batch_release(pool); 186 ib_fmr_batch_release(pool);
188 187
189 atomic_inc(&pool->flush_ser); 188 atomic_inc(&pool->flush_ser);
@@ -194,8 +193,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
194 } 193 }
195 194
196 set_current_state(TASK_INTERRUPTIBLE); 195 set_current_state(TASK_INTERRUPTIBLE);
197 if (pool->dirty_len < pool->dirty_watermark && 196 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
198 atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
199 !kthread_should_stop()) 197 !kthread_should_stop())
200 schedule(); 198 schedule();
201 __set_current_state(TASK_RUNNING); 199 __set_current_state(TASK_RUNNING);
@@ -511,8 +509,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
511 list_add_tail(&fmr->list, &pool->free_list); 509 list_add_tail(&fmr->list, &pool->free_list);
512 } else { 510 } else {
513 list_add_tail(&fmr->list, &pool->dirty_list); 511 list_add_tail(&fmr->list, &pool->dirty_list);
514 ++pool->dirty_len; 512 if (++pool->dirty_len >= pool->dirty_watermark) {
515 wake_up_process(pool->thread); 513 atomic_inc(&pool->req_ser);
514 wake_up_process(pool->thread);
515 }
516 } 516 }
517 } 517 }
518 518