aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-05-11 18:11:11 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:09 -0400
commit7e3f2952eeb1a0fe2aa9882fd1705a88f9d89b35 (patch)
tree903d89d0d1184bd1f33df2b5036615d1c40cfb06 /net/rds/send.c
parent38a4e5e61344490f18241333d7b1b368a3a38748 (diff)
rds: don't let RDS shutdown a connection while senders are present
This is the first in a long line of patches that tries to fix races between RDS connection shutdown and RDS traffic. Here we are maintaining a count of active senders to make sure the connection doesn't go away while they are using it. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index 8e3fd9981c2e..d35c43ff792e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -60,15 +60,23 @@ void rds_send_reset(struct rds_connection *conn)
60 struct rds_message *rm, *tmp; 60 struct rds_message *rm, *tmp;
61 unsigned long flags; 61 unsigned long flags;
62 62
63 spin_lock_irqsave(&conn->c_send_lock, flags);
63 if (conn->c_xmit_rm) { 64 if (conn->c_xmit_rm) {
65 rm = conn->c_xmit_rm;
66 conn->c_xmit_rm = NULL;
64 /* Tell the user the RDMA op is no longer mapped by the 67 /* Tell the user the RDMA op is no longer mapped by the
65 * transport. This isn't entirely true (it's flushed out 68 * transport. This isn't entirely true (it's flushed out
66 * independently) but as the connection is down, there's 69 * independently) but as the connection is down, there's
67 * no ongoing RDMA to/from that memory */ 70 * no ongoing RDMA to/from that memory */
68 rds_message_unmapped(conn->c_xmit_rm); 71printk(KERN_CRIT "send reset unmapping %p\n", rm);
69 rds_message_put(conn->c_xmit_rm); 72 rds_message_unmapped(rm);
70 conn->c_xmit_rm = NULL; 73 spin_unlock_irqrestore(&conn->c_send_lock, flags);
74
75 rds_message_put(rm);
76 } else {
77 spin_unlock_irqrestore(&conn->c_send_lock, flags);
71 } 78 }
79
72 conn->c_xmit_sg = 0; 80 conn->c_xmit_sg = 0;
73 conn->c_xmit_hdr_off = 0; 81 conn->c_xmit_hdr_off = 0;
74 conn->c_xmit_data_off = 0; 82 conn->c_xmit_data_off = 0;
@@ -131,6 +139,7 @@ restart:
131 ret = -ENOMEM; 139 ret = -ENOMEM;
132 goto out; 140 goto out;
133 } 141 }
142 atomic_inc(&conn->c_senders);
134 143
135 if (conn->c_trans->xmit_prepare) 144 if (conn->c_trans->xmit_prepare)
136 conn->c_trans->xmit_prepare(conn); 145 conn->c_trans->xmit_prepare(conn);
@@ -350,6 +359,8 @@ restart:
350 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); 359 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
351 } 360 }
352 361
362 atomic_dec(&conn->c_senders);
363
353 /* 364 /*
354 * Other senders will see we have c_send_lock and exit. We 365 * Other senders will see we have c_send_lock and exit. We
355 * need to recheck the send queue and race again for c_send_lock 366 * need to recheck the send queue and race again for c_send_lock