aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/connection.c
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2010-06-04 17:41:41 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:27 -0400
commit0f4b1c7e89e699f588807a914ec6e6396c851a72 (patch)
tree8b882f85f03089283f6d222bf8c7d5616a102ecd /net/rds/connection.c
parent501dcccdb7a2335cde07d4acb56e636182d62944 (diff)
rds: fix rds_send_xmit() serialization
rds_send_xmit() was changed to hold an interrupt masking spinlock instead of a mutex so that it could be called from the IB receive tasklet path. This broke the TCP transport because its xmit method can block and masks and unmasks interrupts. This patch serializes callers to rds_send_xmit() with a simple bit instead of the current spinlock or previous mutex. This enables rds_send_xmit() to be called from any context and to call functions which block. Getting rid of the c_send_lock exposes the bare c_lock acquisitions which are changed to block interrupts. A waitqueue is added so that rds_conn_shutdown() can wait for callers to leave rds_send_xmit() before tearing down partial send state. This lets us get rid of c_senders. rds_send_xmit() is changed to check the conn state after acquiring the RDS_IN_XMIT bit to resolve races with the shutdown path. Previously both worked with the conn state and then the lock in the same order, allowing them to race and execute the paths concurrently. rds_send_reset() isn't racing with rds_send_xmit() now that rds_conn_shutdown() properly ensures that rds_send_xmit() can't start once the conn state has been changed. We can remove its previous use of the spinlock. Finally, c_send_generation is redundant. Callers can race to test the c_flags bit by simply retrying instead of racing to test the c_send_generation atomic. Signed-off-by: Zach Brown <zach.brown@oracle.com>
Diffstat (limited to 'net/rds/connection.c')
-rw-r--r--net/rds/connection.c19
1 files changed, 5 insertions, 14 deletions
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 5bb0eec5ada3..89871db77f8f 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -148,9 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
148 spin_lock_init(&conn->c_lock); 148 spin_lock_init(&conn->c_lock);
149 conn->c_next_tx_seq = 1; 149 conn->c_next_tx_seq = 1;
150 150
151 spin_lock_init(&conn->c_send_lock); 151 init_waitqueue_head(&conn->c_waitq);
152 atomic_set(&conn->c_send_generation, 1);
153 atomic_set(&conn->c_senders, 0);
154 INIT_LIST_HEAD(&conn->c_send_queue); 152 INIT_LIST_HEAD(&conn->c_send_queue);
155 INIT_LIST_HEAD(&conn->c_retrans); 153 INIT_LIST_HEAD(&conn->c_retrans);
156 154
@@ -275,15 +273,8 @@ void rds_conn_shutdown(struct rds_connection *conn)
275 } 273 }
276 mutex_unlock(&conn->c_cm_lock); 274 mutex_unlock(&conn->c_cm_lock);
277 275
278 /* verify everybody's out of rds_send_xmit() */ 276 wait_event(conn->c_waitq,
279 spin_lock_irq(&conn->c_send_lock); 277 !test_bit(RDS_IN_XMIT, &conn->c_flags));
280 spin_unlock_irq(&conn->c_send_lock);
281
282 while(atomic_read(&conn->c_senders)) {
283 schedule_timeout(1);
284 spin_lock_irq(&conn->c_send_lock);
285 spin_unlock_irq(&conn->c_send_lock);
286 }
287 278
288 conn->c_trans->conn_shutdown(conn); 279 conn->c_trans->conn_shutdown(conn);
289 rds_conn_reset(conn); 280 rds_conn_reset(conn);
@@ -477,8 +468,8 @@ static int rds_conn_info_visitor(struct rds_connection *conn,
477 sizeof(cinfo->transport)); 468 sizeof(cinfo->transport));
478 cinfo->flags = 0; 469 cinfo->flags = 0;
479 470
480 rds_conn_info_set(cinfo->flags, 471 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
481 spin_is_locked(&conn->c_send_lock), SENDING); 472 SENDING);
482 /* XXX Future: return the state rather than these funky bits */ 473 /* XXX Future: return the state rather than these funky bits */
483 rds_conn_info_set(cinfo->flags, 474 rds_conn_info_set(cinfo->flags,
484 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 475 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,