summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-01-12 15:37:17 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:35 -0400
commit3ef13f3c22aaea28aff383cb0883481d24885456 (patch)
tree2d7277a9401c5d47c0a42d5de3e27f1df042da6b /net
parente779137aa76d38d5c33a98ed887092ae4e4f016f (diff)
RDS: cleanup/fix rds_rdma_unuse
First, it looks to me like the atomic_inc is wrong. We should be decrementing refcount only once here, no? It's already being done by the mr_put() at the end. Second, simplify the logic a bit by bailing early (with a warning) if !mr. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/rds/rdma.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 24274bb9e329..5011efa62a92 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -414,27 +414,30 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
414 414
415 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 415 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
417 if (mr && (mr->r_use_once || force)) { 417 if (!mr) {
418 printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
419 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
420 return;
421 }
422
423 if (mr->r_use_once || force) {
418 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); 424 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
419 RB_CLEAR_NODE(&mr->r_rb_node); 425 RB_CLEAR_NODE(&mr->r_rb_node);
420 zot_me = 1; 426 zot_me = 1;
421 } else if (mr) 427 }
422 atomic_inc(&mr->r_refcount);
423 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); 428 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
424 429
425 /* May have to issue a dma_sync on this memory region. 430 /* May have to issue a dma_sync on this memory region.
426 * Note we could avoid this if the operation was a RDMA READ, 431 * Note we could avoid this if the operation was a RDMA READ,
427 * but at this point we can't tell. */ 432 * but at this point we can't tell. */
428 if (mr) { 433 if (mr->r_trans->sync_mr)
429 if (mr->r_trans->sync_mr) 434 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
430 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
431 435
432 /* If the MR was marked as invalidate, this will 436 /* If the MR was marked as invalidate, this will
433 * trigger an async flush. */ 437 * trigger an async flush. */
434 if (zot_me) 438 if (zot_me)
435 rds_destroy_mr(mr); 439 rds_destroy_mr(mr);
436 rds_mr_put(mr); 440 rds_mr_put(mr);
437 }
438} 441}
439 442
440void rds_rdma_free_op(struct rds_rdma_op *ro) 443void rds_rdma_free_op(struct rds_rdma_op *ro)