diff options
Diffstat (limited to 'net/rds/send.c')
-rw-r--r-- | net/rds/send.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/net/rds/send.c b/net/rds/send.c index 8a0647af5d95..d4feec6ad09c 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -116,19 +116,18 @@ int rds_send_xmit(struct rds_connection *conn) | |||
116 | int was_empty = 0; | 116 | int was_empty = 0; |
117 | LIST_HEAD(to_be_dropped); | 117 | LIST_HEAD(to_be_dropped); |
118 | 118 | ||
119 | if (!rds_conn_up(conn)) | ||
120 | goto out; | ||
121 | |||
119 | /* | 122 | /* |
120 | * sendmsg calls here after having queued its message on the send | 123 | * sendmsg calls here after having queued its message on the send |
121 | * queue. We only have one task feeding the connection at a time. If | 124 | * queue. We only have one task feeding the connection at a time. If |
122 | * another thread is already feeding the queue then we back off. This | 125 | * another thread is already feeding the queue then we back off. This |
123 | * avoids blocking the caller and trading per-connection data between | 126 | * avoids blocking the caller and trading per-connection data between |
124 | * caches per message. | 127 | * caches per message. |
125 | * | ||
126 | * The sem holder will issue a retry if they notice that someone queued | ||
127 | * a message after they stopped walking the send queue but before they | ||
128 | * dropped the sem. | ||
129 | */ | 128 | */ |
130 | if (!mutex_trylock(&conn->c_send_lock)) { | 129 | if (!spin_trylock_irqsave(&conn->c_send_lock, flags)) { |
131 | rds_stats_inc(s_send_sem_contention); | 130 | rds_stats_inc(s_send_lock_contention); |
132 | ret = -ENOMEM; | 131 | ret = -ENOMEM; |
133 | goto out; | 132 | goto out; |
134 | } | 133 | } |
@@ -346,7 +345,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
346 | * stop processing the loop when the transport hasn't taken | 345 | * stop processing the loop when the transport hasn't taken |
347 | * responsibility for forward progress. | 346 | * responsibility for forward progress. |
348 | */ | 347 | */ |
349 | mutex_unlock(&conn->c_send_lock); | 348 | spin_unlock_irqrestore(&conn->c_send_lock, flags); |
350 | 349 | ||
351 | if (send_quota == 0 && !was_empty) { | 350 | if (send_quota == 0 && !was_empty) { |
352 | /* We exhausted the send quota, but there's work left to | 351 | /* We exhausted the send quota, but there's work left to |
@@ -360,7 +359,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
360 | * spin lock */ | 359 | * spin lock */ |
361 | spin_lock_irqsave(&conn->c_lock, flags); | 360 | spin_lock_irqsave(&conn->c_lock, flags); |
362 | if (!list_empty(&conn->c_send_queue)) { | 361 | if (!list_empty(&conn->c_send_queue)) { |
363 | rds_stats_inc(s_send_sem_queue_raced); | 362 | rds_stats_inc(s_send_lock_queue_raced); |
364 | ret = -EAGAIN; | 363 | ret = -EAGAIN; |
365 | } | 364 | } |
366 | spin_unlock_irqrestore(&conn->c_lock, flags); | 365 | spin_unlock_irqrestore(&conn->c_lock, flags); |