aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-01-12 17:33:38 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:41 -0400
commit15133f6e67d8d646d0744336b4daa3135452cb0d (patch)
treee5675d5a3ab240edc9a66af6b891dd75fa9eabae /net/rds/send.c
parenta63273d4992603979ddb181b6a8f07082839b39f (diff)
RDS: Implement atomic operations
Implement a CMSG-based interface to do FADD and CSWP ops. Alter send routines to handle atomic ops. Add atomic counters to stats. Add xmit_atomic() to struct rds_transport Inline rds_ib_send_unmap_rdma into unmap_rm Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c71
1 files changed, 68 insertions, 3 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index b751a8e77c41..f3f4e79274bf 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -73,6 +73,7 @@ void rds_send_reset(struct rds_connection *conn)
73 conn->c_xmit_hdr_off = 0; 73 conn->c_xmit_hdr_off = 0;
74 conn->c_xmit_data_off = 0; 74 conn->c_xmit_data_off = 0;
75 conn->c_xmit_rdma_sent = 0; 75 conn->c_xmit_rdma_sent = 0;
76 conn->c_xmit_atomic_sent = 0;
76 77
77 conn->c_map_queued = 0; 78 conn->c_map_queued = 0;
78 79
@@ -171,6 +172,7 @@ int rds_send_xmit(struct rds_connection *conn)
171 conn->c_xmit_hdr_off = 0; 172 conn->c_xmit_hdr_off = 0;
172 conn->c_xmit_data_off = 0; 173 conn->c_xmit_data_off = 0;
173 conn->c_xmit_rdma_sent = 0; 174 conn->c_xmit_rdma_sent = 0;
175 conn->c_xmit_atomic_sent = 0;
174 176
175 /* Release the reference to the previous message. */ 177 /* Release the reference to the previous message. */
176 rds_message_put(rm); 178 rds_message_put(rm);
@@ -262,6 +264,17 @@ int rds_send_xmit(struct rds_connection *conn)
262 conn->c_xmit_rm = rm; 264 conn->c_xmit_rm = rm;
263 } 265 }
264 266
267
268 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
269 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
270 if (ret)
271 break;
272 conn->c_xmit_atomic_sent = 1;
273 /* The transport owns the mapped memory for now.
274 * You can't unmap it while it's on the send queue */
275 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276 }
277
265 /* 278 /*
266 * Try and send an rdma message. Let's see if we can 279 * Try and send an rdma message. Let's see if we can
267 * keep this simple and require that the transport either 280 * keep this simple and require that the transport either
@@ -443,6 +456,41 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
443EXPORT_SYMBOL_GPL(rds_rdma_send_complete); 456EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
444 457
445/* 458/*
459 * Just like above, except looks at atomic op
460 */
461void rds_atomic_send_complete(struct rds_message *rm, int status)
462{
463 struct rds_sock *rs = NULL;
464 struct rm_atomic_op *ao;
465 struct rds_notifier *notifier;
466
467 spin_lock(&rm->m_rs_lock);
468
469 ao = &rm->atomic;
470 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
471 && ao->op_active && ao->op_notify && ao->op_notifier) {
472 notifier = ao->op_notifier;
473 rs = rm->m_rs;
474 sock_hold(rds_rs_to_sk(rs));
475
476 notifier->n_status = status;
477 spin_lock(&rs->rs_lock);
478 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
479 spin_unlock(&rs->rs_lock);
480
481 ao->op_notifier = NULL;
482 }
483
484 spin_unlock(&rm->m_rs_lock);
485
486 if (rs) {
487 rds_wake_sk_sleep(rs);
488 sock_put(rds_rs_to_sk(rs));
489 }
490}
491EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
492
493/*
446 * This is the same as rds_rdma_send_complete except we 494 * This is the same as rds_rdma_send_complete except we
447 * don't do any locking - we have all the ingredients (message, 495 * don't do any locking - we have all the ingredients (message,
448 * socket, socket lock) and can just move the notifier. 496 * socket, socket lock) and can just move the notifier.
@@ -788,6 +836,11 @@ static int rds_rm_size(struct msghdr *msg, int data_len)
788 /* these are valid but do no add any size */ 836 /* these are valid but do no add any size */
789 break; 837 break;
790 838
839 case RDS_CMSG_ATOMIC_CSWP:
840 case RDS_CMSG_ATOMIC_FADD:
841 size += sizeof(struct scatterlist);
842 break;
843
791 default: 844 default:
792 return -EINVAL; 845 return -EINVAL;
793 } 846 }
@@ -813,7 +866,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
813 continue; 866 continue;
814 867
815 /* As a side effect, RDMA_DEST and RDMA_MAP will set 868 /* As a side effect, RDMA_DEST and RDMA_MAP will set
816 * rm->m_rdma_cookie and rm->m_rdma_mr. 869 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
817 */ 870 */
818 switch (cmsg->cmsg_type) { 871 switch (cmsg->cmsg_type) {
819 case RDS_CMSG_RDMA_ARGS: 872 case RDS_CMSG_RDMA_ARGS:
@@ -829,6 +882,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
829 if (!ret) 882 if (!ret)
830 *allocated_mr = 1; 883 *allocated_mr = 1;
831 break; 884 break;
885 case RDS_CMSG_ATOMIC_CSWP:
886 case RDS_CMSG_ATOMIC_FADD:
887 ret = rds_cmsg_atomic(rs, rm, cmsg);
888 break;
832 889
833 default: 890 default:
834 return -EINVAL; 891 return -EINVAL;
@@ -926,10 +983,18 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
926 goto out; 983 goto out;
927 984
928 if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) && 985 if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) &&
929 !conn->c_trans->xmit_rdma) { 986 !conn->c_trans->xmit_rdma) {
930 if (printk_ratelimit()) 987 if (printk_ratelimit())
931 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 988 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
932 &rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma); 989 &rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma);
990 ret = -EOPNOTSUPP;
991 goto out;
992 }
993
994 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
995 if (printk_ratelimit())
996 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
997 &rm->atomic, conn->c_trans->xmit_atomic);
933 ret = -EOPNOTSUPP; 998 ret = -EOPNOTSUPP;
934 goto out; 999 goto out;
935 } 1000 }