aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-01-12 15:15:02 -0500
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:11:33 -0400
commite779137aa76d38d5c33a98ed887092ae4e4f016f (patch)
treeaf0a34f9334bd11ca507d4e63a963c561ff981ae
parent8690bfa17aea4c42da1bcf90a7af93d161eca624 (diff)
RDS: break out rdma and data ops into nested structs in rds_message
Clearly separate rdma-related variables in rm from data-related ones. This is in anticipation of adding atomic support. Signed-off-by: Andy Grover <andy.grover@oracle.com>
-rw-r--r--net/rds/ib_send.c44
-rw-r--r--net/rds/iw_send.c38
-rw-r--r--net/rds/message.c30
-rw-r--r--net/rds/rdma.c9
-rw-r--r--net/rds/rds.h16
-rw-r--r--net/rds/send.c30
-rw-r--r--net/rds/tcp_send.c14
7 files changed, 96 insertions, 85 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 0b0090d2ee01..53750203c9e5 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -83,11 +83,11 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 84
85 ib_dma_unmap_sg(ic->i_cm_id->device, 85 ib_dma_unmap_sg(ic->i_cm_id->device,
86 rm->m_sg, rm->m_nents, 86 rm->data.m_sg, rm->data.m_nents,
87 DMA_TO_DEVICE); 87 DMA_TO_DEVICE);
88 88
89 if (rm->m_rdma_op) { 89 if (rm->rdma.m_rdma_op) {
90 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); 90 rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op);
91 91
92 /* If the user asked for a completion notification on this 92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics: 93 * message, we can implement three different semantics:
@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
111 */ 111 */
112 rds_ib_send_rdma_complete(rm, wc_status); 112 rds_ib_send_rdma_complete(rm, wc_status);
113 113
114 if (rm->m_rdma_op->r_write) 114 if (rm->rdma.m_rdma_op->r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op->r_bytes);
116 else 116 else
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op->r_bytes);
118 } 118 }
119 119
120 /* If anyone waited for this message to get flushed out, wake 120 /* If anyone waited for this message to get flushed out, wake
@@ -244,8 +244,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
244 244
245 rm = rds_send_get_message(conn, send->s_op); 245 rm = rds_send_get_message(conn, send->s_op);
246 if (rm) { 246 if (rm) {
247 if (rm->m_rdma_op) 247 if (rm->rdma.m_rdma_op)
248 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); 248 rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op);
249 rds_ib_send_rdma_complete(rm, wc.status); 249 rds_ib_send_rdma_complete(rm, wc.status);
250 rds_message_put(rm); 250 rds_message_put(rm);
251 } 251 }
@@ -532,18 +532,20 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
532 rm->m_inc.i_hdr.h_flags, 532 rm->m_inc.i_hdr.h_flags,
533 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 533 be32_to_cpu(rm->m_inc.i_hdr.h_len));
534 */ 534 */
535 if (rm->m_nents) { 535 if (rm->data.m_nents) {
536 rm->m_count = ib_dma_map_sg(dev, 536 rm->data.m_count = ib_dma_map_sg(dev,
537 rm->m_sg, rm->m_nents, DMA_TO_DEVICE); 537 rm->data.m_sg,
538 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); 538 rm->data.m_nents,
539 if (rm->m_count == 0) { 539 DMA_TO_DEVICE);
540 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
541 if (rm->data.m_count == 0) {
540 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 542 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
541 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 543 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
542 ret = -ENOMEM; /* XXX ? */ 544 ret = -ENOMEM; /* XXX ? */
543 goto out; 545 goto out;
544 } 546 }
545 } else { 547 } else {
546 rm->m_count = 0; 548 rm->data.m_count = 0;
547 } 549 }
548 550
549 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 551 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
@@ -559,10 +561,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
559 561
560 /* If it has a RDMA op, tell the peer we did it. This is 562 /* If it has a RDMA op, tell the peer we did it. This is
561 * used by the peer to release use-once RDMA MRs. */ 563 * used by the peer to release use-once RDMA MRs. */
562 if (rm->m_rdma_op) { 564 if (rm->rdma.m_rdma_op) {
563 struct rds_ext_header_rdma ext_hdr; 565 struct rds_ext_header_rdma ext_hdr;
564 566
565 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 567 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op->r_key);
566 rds_message_add_extension(&rm->m_inc.i_hdr, 568 rds_message_add_extension(&rm->m_inc.i_hdr,
567 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 569 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
568 } 570 }
@@ -590,7 +592,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
590 send = &ic->i_sends[pos]; 592 send = &ic->i_sends[pos];
591 first = send; 593 first = send;
592 prev = NULL; 594 prev = NULL;
593 scat = &rm->m_sg[sg]; 595 scat = &rm->data.m_sg[sg];
594 sent = 0; 596 sent = 0;
595 i = 0; 597 i = 0;
596 598
@@ -600,7 +602,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
600 * or when requested by the user. Right now, we let 602 * or when requested by the user. Right now, we let
601 * the application choose. 603 * the application choose.
602 */ 604 */
603 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 605 if (rm->rdma.m_rdma_op && rm->rdma.m_rdma_op->r_fence)
604 send_flags = IB_SEND_FENCE; 606 send_flags = IB_SEND_FENCE;
605 607
606 /* 608 /*
@@ -619,7 +621,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
619 } 621 }
620 622
621 /* if there's data reference it with a chain of work reqs */ 623 /* if there's data reference it with a chain of work reqs */
622 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 624 for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) {
623 unsigned int len; 625 unsigned int len;
624 626
625 send = &ic->i_sends[pos]; 627 send = &ic->i_sends[pos];
@@ -697,7 +699,7 @@ add_header:
697 sent += sizeof(struct rds_header); 699 sent += sizeof(struct rds_header);
698 700
699 /* if we finished the message then send completion owns it */ 701 /* if we finished the message then send completion owns it */
700 if (scat == &rm->m_sg[rm->m_count]) { 702 if (scat == &rm->data.m_sg[rm->data.m_count]) {
701 prev->s_rm = ic->i_rm; 703 prev->s_rm = ic->i_rm;
702 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 704 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
703 ic->i_rm = NULL; 705 ic->i_rm = NULL;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index dced532f9cfb..c187e8fdeab1 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -83,11 +83,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 84
85 ib_dma_unmap_sg(ic->i_cm_id->device, 85 ib_dma_unmap_sg(ic->i_cm_id->device,
86 rm->m_sg, rm->m_nents, 86 rm->data.m_sg, rm->data.m_nents,
87 DMA_TO_DEVICE); 87 DMA_TO_DEVICE);
88 88
89 if (rm->m_rdma_op) { 89 if (rm->rdma.m_rdma_op) {
90 rds_iw_send_unmap_rdma(ic, rm->m_rdma_op); 90 rds_iw_send_unmap_rdma(ic, rm->rdma.m_rdma_op);
91 91
92 /* If the user asked for a completion notification on this 92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics: 93 * message, we can implement three different semantics:
@@ -111,10 +111,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
111 */ 111 */
112 rds_iw_send_rdma_complete(rm, wc_status); 112 rds_iw_send_rdma_complete(rm, wc_status);
113 113
114 if (rm->m_rdma_op->r_write) 114 if (rm->rdma.m_rdma_op->r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op->r_bytes);
116 else 116 else
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op->r_bytes);
118 } 118 }
119 119
120 /* If anyone waited for this message to get flushed out, wake 120 /* If anyone waited for this message to get flushed out, wake
@@ -563,18 +563,20 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
563 rm->m_inc.i_hdr.h_flags, 563 rm->m_inc.i_hdr.h_flags,
564 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 564 be32_to_cpu(rm->m_inc.i_hdr.h_len));
565 */ 565 */
566 if (rm->m_nents) { 566 if (rm->data.m_nents) {
567 rm->m_count = ib_dma_map_sg(dev, 567 rm->data.m_count = ib_dma_map_sg(dev,
568 rm->m_sg, rm->m_nents, DMA_TO_DEVICE); 568 rm->data.m_sg,
569 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); 569 rm->data.m_nents,
570 if (rm->m_count == 0) { 570 DMA_TO_DEVICE);
571 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
572 if (rm->data.m_count == 0) {
571 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 573 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
572 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); 574 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
573 ret = -ENOMEM; /* XXX ? */ 575 ret = -ENOMEM; /* XXX ? */
574 goto out; 576 goto out;
575 } 577 }
576 } else { 578 } else {
577 rm->m_count = 0; 579 rm->data.m_count = 0;
578 } 580 }
579 581
580 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 582 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
@@ -590,10 +592,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
590 592
591 /* If it has a RDMA op, tell the peer we did it. This is 593 /* If it has a RDMA op, tell the peer we did it. This is
592 * used by the peer to release use-once RDMA MRs. */ 594 * used by the peer to release use-once RDMA MRs. */
593 if (rm->m_rdma_op) { 595 if (rm->rdma.m_rdma_op) {
594 struct rds_ext_header_rdma ext_hdr; 596 struct rds_ext_header_rdma ext_hdr;
595 597
596 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 598 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op->r_key);
597 rds_message_add_extension(&rm->m_inc.i_hdr, 599 rds_message_add_extension(&rm->m_inc.i_hdr,
598 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 600 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
599 } 601 }
@@ -621,7 +623,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
621 send = &ic->i_sends[pos]; 623 send = &ic->i_sends[pos];
622 first = send; 624 first = send;
623 prev = NULL; 625 prev = NULL;
624 scat = &rm->m_sg[sg]; 626 scat = &rm->data.m_sg[sg];
625 sent = 0; 627 sent = 0;
626 i = 0; 628 i = 0;
627 629
@@ -631,7 +633,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
631 * or when requested by the user. Right now, we let 633 * or when requested by the user. Right now, we let
632 * the application choose. 634 * the application choose.
633 */ 635 */
634 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 636 if (rm->rdma.m_rdma_op && rm->rdma.m_rdma_op->r_fence)
635 send_flags = IB_SEND_FENCE; 637 send_flags = IB_SEND_FENCE;
636 638
637 /* 639 /*
@@ -650,7 +652,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
650 } 652 }
651 653
652 /* if there's data reference it with a chain of work reqs */ 654 /* if there's data reference it with a chain of work reqs */
653 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 655 for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) {
654 unsigned int len; 656 unsigned int len;
655 657
656 send = &ic->i_sends[pos]; 658 send = &ic->i_sends[pos];
@@ -728,7 +730,7 @@ add_header:
728 sent += sizeof(struct rds_header); 730 sent += sizeof(struct rds_header);
729 731
730 /* if we finished the message then send completion owns it */ 732 /* if we finished the message then send completion owns it */
731 if (scat == &rm->m_sg[rm->m_count]) { 733 if (scat == &rm->data.m_sg[rm->data.m_count]) {
732 prev->s_rm = ic->i_rm; 734 prev->s_rm = ic->i_rm;
733 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 735 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
734 ic->i_rm = NULL; 736 ic->i_rm = NULL;
diff --git a/net/rds/message.c b/net/rds/message.c
index 809656c2b25c..4421d160b1a4 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -63,17 +63,17 @@ static void rds_message_purge(struct rds_message *rm)
63 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) 63 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
64 return; 64 return;
65 65
66 for (i = 0; i < rm->m_nents; i++) { 66 for (i = 0; i < rm->data.m_nents; i++) {
67 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i])); 67 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.m_sg[i]));
68 /* XXX will have to put_page for page refs */ 68 /* XXX will have to put_page for page refs */
69 __free_page(sg_page(&rm->m_sg[i])); 69 __free_page(sg_page(&rm->data.m_sg[i]));
70 } 70 }
71 rm->m_nents = 0; 71 rm->data.m_nents = 0;
72 72
73 if (rm->m_rdma_op) 73 if (rm->rdma.m_rdma_op)
74 rds_rdma_free_op(rm->m_rdma_op); 74 rds_rdma_free_op(rm->rdma.m_rdma_op);
75 if (rm->m_rdma_mr) 75 if (rm->rdma.m_rdma_mr)
76 rds_mr_put(rm->m_rdma_mr); 76 rds_mr_put(rm->rdma.m_rdma_mr);
77} 77}
78 78
79void rds_message_inc_purge(struct rds_incoming *inc) 79void rds_message_inc_purge(struct rds_incoming *inc)
@@ -224,7 +224,7 @@ struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
224 goto out; 224 goto out;
225 225
226 if (nents) 226 if (nents)
227 sg_init_table(rm->m_sg, nents); 227 sg_init_table(rm->data.m_sg, nents);
228 atomic_set(&rm->m_refcount, 1); 228 atomic_set(&rm->m_refcount, 1);
229 INIT_LIST_HEAD(&rm->m_sock_item); 229 INIT_LIST_HEAD(&rm->m_sock_item);
230 INIT_LIST_HEAD(&rm->m_conn_item); 230 INIT_LIST_HEAD(&rm->m_conn_item);
@@ -245,10 +245,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
245 245
246 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 246 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
247 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 247 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
248 rm->m_nents = ceil(total_len, PAGE_SIZE); 248 rm->data.m_nents = ceil(total_len, PAGE_SIZE);
249 249
250 for (i = 0; i < rm->m_nents; ++i) { 250 for (i = 0; i < rm->data.m_nents; ++i) {
251 sg_set_page(&rm->m_sg[i], 251 sg_set_page(&rm->data.m_sg[i],
252 virt_to_page(page_addrs[i]), 252 virt_to_page(page_addrs[i]),
253 PAGE_SIZE, 0); 253 PAGE_SIZE, 0);
254 } 254 }
@@ -278,7 +278,7 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
278 /* 278 /*
279 * now allocate and copy in the data payload. 279 * now allocate and copy in the data payload.
280 */ 280 */
281 sg = rm->m_sg; 281 sg = rm->data.m_sg;
282 iov = first_iov; 282 iov = first_iov;
283 iov_off = 0; 283 iov_off = 0;
284 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ 284 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
@@ -289,7 +289,7 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
289 GFP_HIGHUSER); 289 GFP_HIGHUSER);
290 if (ret) 290 if (ret)
291 goto out; 291 goto out;
292 rm->m_nents++; 292 rm->data.m_nents++;
293 sg_off = 0; 293 sg_off = 0;
294 } 294 }
295 295
@@ -348,7 +348,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
348 348
349 iov = first_iov; 349 iov = first_iov;
350 iov_off = 0; 350 iov_off = 0;
351 sg = rm->m_sg; 351 sg = rm->data.m_sg;
352 vec_off = 0; 352 vec_off = 0;
353 copied = 0; 353 copied = 0;
354 354
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index dee698b979af..24274bb9e329 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -643,14 +643,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
643 struct rds_rdma_op *op; 643 struct rds_rdma_op *op;
644 644
645 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || 645 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
646 rm->m_rdma_op) 646 rm->rdma.m_rdma_op)
647 return -EINVAL; 647 return -EINVAL;
648 648
649 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); 649 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
650 if (IS_ERR(op)) 650 if (IS_ERR(op))
651 return PTR_ERR(op); 651 return PTR_ERR(op);
652 rds_stats_inc(s_send_rdma); 652 rds_stats_inc(s_send_rdma);
653 rm->m_rdma_op = op; 653 rm->rdma.m_rdma_op = op;
654 return 0; 654 return 0;
655} 655}
656 656
@@ -679,6 +679,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
679 */ 679 */
680 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); 680 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
681 681
682
682 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 683 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
683 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 684 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
684 if (!mr) 685 if (!mr)
@@ -689,7 +690,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
689 690
690 if (mr) { 691 if (mr) {
691 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); 692 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
692 rm->m_rdma_mr = mr; 693 rm->rdma.m_rdma_mr = mr;
693 } 694 }
694 return err; 695 return err;
695} 696}
@@ -707,5 +708,5 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
707 rm->m_rdma_cookie != 0) 708 rm->m_rdma_cookie != 0)
708 return -EINVAL; 709 return -EINVAL;
709 710
710 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); 711 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr);
711} 712}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 1d3eef67137f..07a750b3fb31 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -259,12 +259,18 @@ struct rds_message {
259 */ 259 */
260 spinlock_t m_rs_lock; 260 spinlock_t m_rs_lock;
261 struct rds_sock *m_rs; 261 struct rds_sock *m_rs;
262 struct rds_rdma_op *m_rdma_op;
263 rds_rdma_cookie_t m_rdma_cookie; 262 rds_rdma_cookie_t m_rdma_cookie;
264 struct rds_mr *m_rdma_mr; 263 struct {
265 unsigned int m_nents; 264 struct {
266 unsigned int m_count; 265 struct rds_rdma_op *m_rdma_op;
267 struct scatterlist m_sg[0]; 266 struct rds_mr *m_rdma_mr;
267 } rdma;
268 struct {
269 unsigned int m_nents;
270 unsigned int m_count;
271 struct scatterlist m_sg[0];
272 } data;
273 };
268}; 274};
269 275
270/* 276/*
diff --git a/net/rds/send.c b/net/rds/send.c
index 817997daf785..19dfd025498e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -166,7 +166,7 @@ int rds_send_xmit(struct rds_connection *conn)
166 rm = conn->c_xmit_rm; 166 rm = conn->c_xmit_rm;
167 if (rm && 167 if (rm &&
168 conn->c_xmit_hdr_off == sizeof(struct rds_header) && 168 conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 conn->c_xmit_sg == rm->m_nents) { 169 conn->c_xmit_sg == rm->data.m_nents) {
170 conn->c_xmit_rm = NULL; 170 conn->c_xmit_rm = NULL;
171 conn->c_xmit_sg = 0; 171 conn->c_xmit_sg = 0;
172 conn->c_xmit_hdr_off = 0; 172 conn->c_xmit_hdr_off = 0;
@@ -236,7 +236,7 @@ int rds_send_xmit(struct rds_connection *conn)
236 * connection. 236 * connection.
237 * Therefore, we never retransmit messages with RDMA ops. 237 * Therefore, we never retransmit messages with RDMA ops.
238 */ 238 */
239 if (rm->m_rdma_op && 239 if (rm->rdma.m_rdma_op &&
240 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 240 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
241 spin_lock_irqsave(&conn->c_lock, flags); 241 spin_lock_irqsave(&conn->c_lock, flags);
242 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 242 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
@@ -268,8 +268,8 @@ int rds_send_xmit(struct rds_connection *conn)
268 * keep this simple and require that the transport either 268 * keep this simple and require that the transport either
269 * send the whole rdma or none of it. 269 * send the whole rdma or none of it.
270 */ 270 */
271 if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) { 271 if (rm->rdma.m_rdma_op && !conn->c_xmit_rdma_sent) {
272 ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op); 272 ret = conn->c_trans->xmit_rdma(conn, rm->rdma.m_rdma_op);
273 if (ret) 273 if (ret)
274 break; 274 break;
275 conn->c_xmit_rdma_sent = 1; 275 conn->c_xmit_rdma_sent = 1;
@@ -279,7 +279,7 @@ int rds_send_xmit(struct rds_connection *conn)
279 } 279 }
280 280
281 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) || 281 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
282 conn->c_xmit_sg < rm->m_nents) { 282 conn->c_xmit_sg < rm->data.m_nents) {
283 ret = conn->c_trans->xmit(conn, rm, 283 ret = conn->c_trans->xmit(conn, rm,
284 conn->c_xmit_hdr_off, 284 conn->c_xmit_hdr_off,
285 conn->c_xmit_sg, 285 conn->c_xmit_sg,
@@ -295,7 +295,7 @@ int rds_send_xmit(struct rds_connection *conn)
295 ret -= tmp; 295 ret -= tmp;
296 } 296 }
297 297
298 sg = &rm->m_sg[conn->c_xmit_sg]; 298 sg = &rm->data.m_sg[conn->c_xmit_sg];
299 while (ret) { 299 while (ret) {
300 tmp = min_t(int, ret, sg->length - 300 tmp = min_t(int, ret, sg->length -
301 conn->c_xmit_data_off); 301 conn->c_xmit_data_off);
@@ -306,7 +306,7 @@ int rds_send_xmit(struct rds_connection *conn)
306 sg++; 306 sg++;
307 conn->c_xmit_sg++; 307 conn->c_xmit_sg++;
308 BUG_ON(ret != 0 && 308 BUG_ON(ret != 0 &&
309 conn->c_xmit_sg == rm->m_nents); 309 conn->c_xmit_sg == rm->data.m_nents);
310 } 310 }
311 } 311 }
312 } 312 }
@@ -419,7 +419,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
419 419
420 spin_lock_irqsave(&rm->m_rs_lock, flags); 420 spin_lock_irqsave(&rm->m_rs_lock, flags);
421 421
422 ro = rm->m_rdma_op; 422 ro = rm->rdma.m_rdma_op;
423 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 423 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
424 ro && ro->r_notify && ro->r_notifier) { 424 ro && ro->r_notify && ro->r_notifier) {
425 notifier = ro->r_notifier; 425 notifier = ro->r_notifier;
@@ -453,7 +453,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
453{ 453{
454 struct rds_rdma_op *ro; 454 struct rds_rdma_op *ro;
455 455
456 ro = rm->m_rdma_op; 456 ro = rm->rdma.m_rdma_op;
457 if (ro && ro->r_notify && ro->r_notifier) { 457 if (ro && ro->r_notify && ro->r_notifier) {
458 ro->r_notifier->n_status = status; 458 ro->r_notifier->n_status = status;
459 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); 459 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
@@ -477,7 +477,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
477 spin_lock_irqsave(&conn->c_lock, flags); 477 spin_lock_irqsave(&conn->c_lock, flags);
478 478
479 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 479 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
480 if (rm->m_rdma_op == op) { 480 if (rm->rdma.m_rdma_op == op) {
481 atomic_inc(&rm->m_refcount); 481 atomic_inc(&rm->m_refcount);
482 found = rm; 482 found = rm;
483 goto out; 483 goto out;
@@ -485,7 +485,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
485 } 485 }
486 486
487 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 487 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
488 if (rm->m_rdma_op == op) { 488 if (rm->rdma.m_rdma_op == op) {
489 atomic_inc(&rm->m_refcount); 489 atomic_inc(&rm->m_refcount);
490 found = rm; 490 found = rm;
491 break; 491 break;
@@ -545,7 +545,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
545 spin_lock(&rs->rs_lock); 545 spin_lock(&rs->rs_lock);
546 546
547 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 547 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
548 struct rds_rdma_op *ro = rm->m_rdma_op; 548 struct rds_rdma_op *ro = rm->rdma.m_rdma_op;
549 struct rds_notifier *notifier; 549 struct rds_notifier *notifier;
550 550
551 list_del_init(&rm->m_sock_item); 551 list_del_init(&rm->m_sock_item);
@@ -557,7 +557,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
557 &rs->rs_notify_queue); 557 &rs->rs_notify_queue);
558 if (!notifier->n_status) 558 if (!notifier->n_status)
559 notifier->n_status = status; 559 notifier->n_status = status;
560 rm->m_rdma_op->r_notifier = NULL; 560 rm->rdma.m_rdma_op->r_notifier = NULL;
561 } 561 }
562 was_on_sock = 1; 562 was_on_sock = 1;
563 rm->m_rs = NULL; 563 rm->m_rs = NULL;
@@ -874,11 +874,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
874 if (ret) 874 if (ret)
875 goto out; 875 goto out;
876 876
877 if ((rm->m_rdma_cookie || rm->m_rdma_op) && 877 if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op) &&
878 !conn->c_trans->xmit_rdma) { 878 !conn->c_trans->xmit_rdma) {
879 if (printk_ratelimit()) 879 if (printk_ratelimit())
880 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 880 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
881 rm->m_rdma_op, conn->c_trans->xmit_rdma); 881 rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma);
882 ret = -EOPNOTSUPP; 882 ret = -EOPNOTSUPP;
883 goto out; 883 goto out;
884 } 884 }
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index e5f6ccef79ef..d63aa35ac673 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -166,21 +166,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
166 goto out; 166 goto out;
167 } 167 }
168 168
169 while (sg < rm->m_nents) { 169 while (sg < rm->data.m_nents) {
170 ret = tc->t_sock->ops->sendpage(tc->t_sock, 170 ret = tc->t_sock->ops->sendpage(tc->t_sock,
171 sg_page(&rm->m_sg[sg]), 171 sg_page(&rm->data.m_sg[sg]),
172 rm->m_sg[sg].offset + off, 172 rm->data.m_sg[sg].offset + off,
173 rm->m_sg[sg].length - off, 173 rm->data.m_sg[sg].length - off,
174 MSG_DONTWAIT|MSG_NOSIGNAL); 174 MSG_DONTWAIT|MSG_NOSIGNAL);
175 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), 175 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.m_sg[sg]),
176 rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, 176 rm->data.m_sg[sg].offset + off, rm->data.m_sg[sg].length - off,
177 ret); 177 ret);
178 if (ret <= 0) 178 if (ret <= 0)
179 break; 179 break;
180 180
181 off += ret; 181 off += ret;
182 done += ret; 182 done += ret;
183 if (off == rm->m_sg[sg].length) { 183 if (off == rm->data.m_sg[sg].length) {
184 off = 0; 184 off = 0;
185 sg++; 185 sg++;
186 } 186 }