diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-03-01 17:03:09 -0500 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:12:08 -0400 |
commit | ff3d7d36134ef7138803734fdbf91cc986ea7976 (patch) | |
tree | 77ef55e071f84f1b97550dbc6d6755cb3089339b /net/rds/send.c | |
parent | aa0a4ef4ac3a3c5ffa35e32520bfbc0922ef3630 (diff) |
RDS: Perform unmapping ops in stages
Previously, RDS would wait until the final send WR had completed
and then handle cleanup. With silent ops, we do not know
if an atomic, rdma, or data op will be last. This patch
handles any of these cases by keeping a pointer to the last
op in the message in m_last_op.
When the TX completion event fires, rds dispatches to per-op-type
cleanup functions, and then does whole-message cleanup, if the
last op equalled m_last_op.
This patch also moves towards having op-specific functions take
the op struct, instead of the overall rm struct.
rds_ib_connection has a pointer to keep track of a a partially-
completed data send operation. This patch changes it from an
rds_message pointer to the narrower rm_data_op pointer, and
modifies places that use this pointer as needed.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r-- | net/rds/send.c | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/net/rds/send.c b/net/rds/send.c index 69ab1040d02d..d1f364e44e36 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -252,6 +252,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
252 | 252 | ||
253 | /* The transport either sends the whole rdma or none of it */ | 253 | /* The transport either sends the whole rdma or none of it */ |
254 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { | 254 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { |
255 | rm->m_final_op = &rm->rdma; | ||
255 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); | 256 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
256 | if (ret) | 257 | if (ret) |
257 | break; | 258 | break; |
@@ -263,10 +264,12 @@ int rds_send_xmit(struct rds_connection *conn) | |||
263 | } | 264 | } |
264 | 265 | ||
265 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { | 266 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { |
266 | ret = conn->c_trans->xmit_atomic(conn, rm); | 267 | rm->m_final_op = &rm->atomic; |
268 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); | ||
267 | if (ret) | 269 | if (ret) |
268 | break; | 270 | break; |
269 | conn->c_xmit_atomic_sent = 1; | 271 | conn->c_xmit_atomic_sent = 1; |
272 | |||
270 | /* The transport owns the mapped memory for now. | 273 | /* The transport owns the mapped memory for now. |
271 | * You can't unmap it while it's on the send queue */ | 274 | * You can't unmap it while it's on the send queue */ |
272 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | 275 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); |
@@ -295,6 +298,7 @@ int rds_send_xmit(struct rds_connection *conn) | |||
295 | } | 298 | } |
296 | 299 | ||
297 | if (rm->data.op_active && !conn->c_xmit_data_sent) { | 300 | if (rm->data.op_active && !conn->c_xmit_data_sent) { |
301 | rm->m_final_op = &rm->data; | ||
298 | ret = conn->c_trans->xmit(conn, rm, | 302 | ret = conn->c_trans->xmit(conn, rm, |
299 | conn->c_xmit_hdr_off, | 303 | conn->c_xmit_hdr_off, |
300 | conn->c_xmit_sg, | 304 | conn->c_xmit_sg, |