aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/rdma.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-01-12 17:33:38 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:41 -0400
commit15133f6e67d8d646d0744336b4daa3135452cb0d (patch)
treee5675d5a3ab240edc9a66af6b891dd75fa9eabae /net/rds/rdma.c
parenta63273d4992603979ddb181b6a8f07082839b39f (diff)
RDS: Implement atomic operations
Implement a CMSG-based interface to do FADD and CSWP ops. Alter send routines to handle atomic ops. Add atomic counters to stats. Add xmit_atomic() to struct rds_transport Inline rds_ib_send_unmap_rdma into unmap_rm Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/rdma.c')
-rw-r--r--net/rds/rdma.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 4fda33045598..a7019df38c70 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -719,3 +719,76 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
719 719
720 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr); 720 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr);
721} 721}
722
723/*
724 * Fill in rds_message for an atomic request.
725 */
726int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
727 struct cmsghdr *cmsg)
728{
729 struct page *page = NULL;
730 struct rds_atomic_args *args;
731 int ret = 0;
732
733 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
734 || rm->atomic.op_active)
735 return -EINVAL;
736
737 args = CMSG_DATA(cmsg);
738
739 if (cmsg->cmsg_type == RDS_CMSG_ATOMIC_CSWP) {
740 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
741 rm->atomic.op_swap_add = args->cswp.swap;
742 rm->atomic.op_compare = args->cswp.compare;
743 } else {
744 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
745 rm->atomic.op_swap_add = args->fadd.add;
746 }
747
748 rm->m_rdma_cookie = args->cookie;
749 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
750 rm->atomic.op_recverr = rs->rs_recverr;
751 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
752
753 /* verify 8 byte-aligned */
754 if (args->local_addr & 0x7) {
755 ret = -EFAULT;
756 goto err;
757 }
758
759 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
760 if (ret != 1)
761 goto err;
762 ret = 0;
763
764 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
765
766 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
767 /* We allocate an uninitialized notifier here, because
768 * we don't want to do that in the completion handler. We
769 * would have to use GFP_ATOMIC there, and don't want to deal
770 * with failed allocations.
771 */
772 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
773 if (!rm->atomic.op_notifier) {
774 ret = -ENOMEM;
775 goto err;
776 }
777
778 rm->atomic.op_notifier->n_user_token = args->user_token;
779 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
780 }
781
782 rm->atomic.op_rkey = rds_rdma_cookie_key(rm->m_rdma_cookie);
783 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
784
785 rm->atomic.op_active = 1;
786
787 return ret;
788err:
789 if (page)
790 put_page(page);
791 kfree(rm->atomic.op_notifier);
792
793 return ret;
794}