aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-23 20:39:07 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:12:12 -0400
commit049ee3f500954176a87f22e6ee3e98aecb1b8958 (patch)
tree5dfd8cf3e6d9a7a15e80f6ddee7f4ce7c4aa7a8c /net/rds/send.c
parentf17a1a55fb672d7f64be7f2e940ef5669e5efa0a (diff)
RDS: Change send lock from a mutex to a spinlock
This change allows us to call rds_send_xmit() from a tasklet, which is crucial to our new operating model. * Change c_send_lock to a spinlock * Update stats fields "sem_" to "_lock" * Remove unneeded rds_conn_is_sending() About locking between shutdown and send -- send checks if the connection is up. Shutdown puts the connection into DISCONNECTING. After this, all threads entering send will exit immediately. However, a thread could be *in* send_xmit(), so shutdown acquires the c_send_lock to ensure everyone is out before proceeding with connection shutdown. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index 8a0647af5d95..d4feec6ad09c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -116,19 +116,18 @@ int rds_send_xmit(struct rds_connection *conn)
116 int was_empty = 0; 116 int was_empty = 0;
117 LIST_HEAD(to_be_dropped); 117 LIST_HEAD(to_be_dropped);
118 118
119 if (!rds_conn_up(conn))
120 goto out;
121
119 /* 122 /*
120 * sendmsg calls here after having queued its message on the send 123 * sendmsg calls here after having queued its message on the send
121 * queue. We only have one task feeding the connection at a time. If 124 * queue. We only have one task feeding the connection at a time. If
122 * another thread is already feeding the queue then we back off. This 125 * another thread is already feeding the queue then we back off. This
123 * avoids blocking the caller and trading per-connection data between 126 * avoids blocking the caller and trading per-connection data between
124 * caches per message. 127 * caches per message.
125 *
126 * The sem holder will issue a retry if they notice that someone queued
127 * a message after they stopped walking the send queue but before they
128 * dropped the sem.
129 */ 128 */
130 if (!mutex_trylock(&conn->c_send_lock)) { 129 if (!spin_trylock_irqsave(&conn->c_send_lock, flags)) {
131 rds_stats_inc(s_send_sem_contention); 130 rds_stats_inc(s_send_lock_contention);
132 ret = -ENOMEM; 131 ret = -ENOMEM;
133 goto out; 132 goto out;
134 } 133 }
@@ -346,7 +345,7 @@ int rds_send_xmit(struct rds_connection *conn)
346 * stop processing the loop when the transport hasn't taken 345 * stop processing the loop when the transport hasn't taken
347 * responsibility for forward progress. 346 * responsibility for forward progress.
348 */ 347 */
349 mutex_unlock(&conn->c_send_lock); 348 spin_unlock_irqrestore(&conn->c_send_lock, flags);
350 349
351 if (send_quota == 0 && !was_empty) { 350 if (send_quota == 0 && !was_empty) {
352 /* We exhausted the send quota, but there's work left to 351 /* We exhausted the send quota, but there's work left to
@@ -360,7 +359,7 @@ int rds_send_xmit(struct rds_connection *conn)
360 * spin lock */ 359 * spin lock */
361 spin_lock_irqsave(&conn->c_lock, flags); 360 spin_lock_irqsave(&conn->c_lock, flags);
362 if (!list_empty(&conn->c_send_queue)) { 361 if (!list_empty(&conn->c_send_queue)) {
363 rds_stats_inc(s_send_sem_queue_raced); 362 rds_stats_inc(s_send_lock_queue_raced);
364 ret = -EAGAIN; 363 ret = -EAGAIN;
365 } 364 }
366 spin_unlock_irqrestore(&conn->c_lock, flags); 365 spin_unlock_irqrestore(&conn->c_lock, flags);