diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-03-23 20:48:04 -0400 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:12:13 -0400 |
commit | 2ad8099b58f274dc23bc866ca259d7e5db87fa1a (patch) | |
tree | e659d7178d1e2729fc306d3b5dbb4e5e5f47544a /net/rds/send.c | |
parent | 049ee3f500954176a87f22e6ee3e98aecb1b8958 (diff) |
RDS: rds_send_xmit() locking/irq fixes
rds_message_put() cannot be called with irqs off, so move it after
irqs are re-enabled.
Spinlocks throughout the function do not to use _irqsave because
the lock of c_send_lock at top already disabled irqs.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r-- | net/rds/send.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/net/rds/send.c b/net/rds/send.c index d4feec6ad09c..624a3dc7f060 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -168,7 +168,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
168 | if (!rm) { | 168 | if (!rm) { |
169 | unsigned int len; | 169 | unsigned int len; |
170 | 170 | ||
171 | spin_lock_irqsave(&conn->c_lock, flags); | 171 | spin_lock(&conn->c_lock); |
172 | 172 | ||
173 | if (!list_empty(&conn->c_send_queue)) { | 173 | if (!list_empty(&conn->c_send_queue)) { |
174 | rm = list_entry(conn->c_send_queue.next, | 174 | rm = list_entry(conn->c_send_queue.next, |
@@ -183,7 +183,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
183 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); | 183 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); |
184 | } | 184 | } |
185 | 185 | ||
186 | spin_unlock_irqrestore(&conn->c_lock, flags); | 186 | spin_unlock(&conn->c_lock); |
187 | 187 | ||
188 | if (!rm) { | 188 | if (!rm) { |
189 | was_empty = 1; | 189 | was_empty = 1; |
@@ -199,11 +199,10 @@ int rds_send_xmit(struct rds_connection *conn) | |||
199 | */ | 199 | */ |
200 | if (rm->rdma.op_active && | 200 | if (rm->rdma.op_active && |
201 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { | 201 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
202 | spin_lock_irqsave(&conn->c_lock, flags); | 202 | spin_lock(&conn->c_lock); |
203 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | 203 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
204 | list_move(&rm->m_conn_item, &to_be_dropped); | 204 | list_move(&rm->m_conn_item, &to_be_dropped); |
205 | spin_unlock_irqrestore(&conn->c_lock, flags); | 205 | spin_unlock(&conn->c_lock); |
206 | rds_message_put(rm); | ||
207 | continue; | 206 | continue; |
208 | } | 207 | } |
209 | 208 | ||
@@ -326,10 +325,6 @@ int rds_send_xmit(struct rds_connection *conn) | |||
326 | } | 325 | } |
327 | } | 326 | } |
328 | 327 | ||
329 | /* Nuke any messages we decided not to retransmit. */ | ||
330 | if (!list_empty(&to_be_dropped)) | ||
331 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | ||
332 | |||
333 | if (conn->c_trans->xmit_complete) | 328 | if (conn->c_trans->xmit_complete) |
334 | conn->c_trans->xmit_complete(conn); | 329 | conn->c_trans->xmit_complete(conn); |
335 | 330 | ||
@@ -347,6 +342,14 @@ int rds_send_xmit(struct rds_connection *conn) | |||
347 | */ | 342 | */ |
348 | spin_unlock_irqrestore(&conn->c_send_lock, flags); | 343 | spin_unlock_irqrestore(&conn->c_send_lock, flags); |
349 | 344 | ||
345 | /* Nuke any messages we decided not to retransmit. */ | ||
346 | if (!list_empty(&to_be_dropped)) { | ||
347 | /* irqs on here, so we can put(), unlike above */ | ||
348 | list_for_each_entry(rm, &to_be_dropped, m_conn_item) | ||
349 | rds_message_put(rm); | ||
350 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | ||
351 | } | ||
352 | |||
350 | if (send_quota == 0 && !was_empty) { | 353 | if (send_quota == 0 && !was_empty) { |
351 | /* We exhausted the send quota, but there's work left to | 354 | /* We exhausted the send quota, but there's work left to |
352 | * do. Return and (re-)schedule the send worker. | 355 | * do. Return and (re-)schedule the send worker. |