diff options
-rw-r--r-- | net/rds/ib_send.c | 11 | ||||
-rw-r--r-- | net/rds/send.c | 11 |
2 files changed, 14 insertions, 8 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 657037d96cbf..82459e52c771 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -268,11 +268,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
268 | if (send->s_queued + HZ/2 < jiffies) | 268 | if (send->s_queued + HZ/2 < jiffies) |
269 | rds_ib_stats_inc(s_ib_tx_stalled); | 269 | rds_ib_stats_inc(s_ib_tx_stalled); |
270 | 270 | ||
271 | if (&send->s_op == &rm->m_final_op) { | 271 | if (send->s_op) { |
272 | /* If anyone waited for this message to get flushed out, wake | 272 | if (send->s_op == rm->m_final_op) { |
273 | * them up now */ | 273 | /* If anyone waited for this message to get flushed out, wake |
274 | rds_message_unmapped(rm); | 274 | * them up now */ |
275 | 275 | rds_message_unmapped(rm); | |
276 | } | ||
276 | rds_message_put(rm); | 277 | rds_message_put(rm); |
277 | send->s_op = NULL; | 278 | send->s_op = NULL; |
278 | } | 279 | } |
diff --git a/net/rds/send.c b/net/rds/send.c index d35c43ff792e..5c6d4a0be0d7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -68,7 +68,6 @@ void rds_send_reset(struct rds_connection *conn) | |||
68 | * transport. This isn't entirely true (it's flushed out | 68 | * transport. This isn't entirely true (it's flushed out |
69 | * independently) but as the connection is down, there's | 69 | * independently) but as the connection is down, there's |
70 | * no ongoing RDMA to/from that memory */ | 70 | * no ongoing RDMA to/from that memory */ |
71 | printk(KERN_CRIT "send reset unmapping %p\n", rm); | ||
72 | rds_message_unmapped(rm); | 71 | rds_message_unmapped(rm); |
73 | spin_unlock_irqrestore(&conn->c_send_lock, flags); | 72 | spin_unlock_irqrestore(&conn->c_send_lock, flags); |
74 | 73 | ||
@@ -234,10 +233,13 @@ restart: | |||
234 | 233 | ||
235 | /* The transport either sends the whole rdma or none of it */ | 234 | /* The transport either sends the whole rdma or none of it */ |
236 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { | 235 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { |
236 | rds_message_addref(rm); | ||
237 | rm->m_final_op = &rm->rdma; | 237 | rm->m_final_op = &rm->rdma; |
238 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); | 238 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
239 | if (ret) | 239 | if (ret) { |
240 | rds_message_put(rm); | ||
240 | break; | 241 | break; |
242 | } | ||
241 | conn->c_xmit_rdma_sent = 1; | 243 | conn->c_xmit_rdma_sent = 1; |
242 | 244 | ||
243 | /* The transport owns the mapped memory for now. | 245 | /* The transport owns the mapped memory for now. |
@@ -246,10 +248,13 @@ restart: | |||
246 | } | 248 | } |
247 | 249 | ||
248 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { | 250 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { |
251 | rds_message_addref(rm); | ||
249 | rm->m_final_op = &rm->atomic; | 252 | rm->m_final_op = &rm->atomic; |
250 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); | 253 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); |
251 | if (ret) | 254 | if (ret) { |
255 | rds_message_put(rm); | ||
252 | break; | 256 | break; |
257 | } | ||
253 | conn->c_xmit_atomic_sent = 1; | 258 | conn->c_xmit_atomic_sent = 1; |
254 | 259 | ||
255 | /* The transport owns the mapped memory for now. | 260 | /* The transport owns the mapped memory for now. |