diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-01-12 15:15:02 -0500 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:11:33 -0400 |
commit | e779137aa76d38d5c33a98ed887092ae4e4f016f (patch) | |
tree | af0a34f9334bd11ca507d4e63a963c561ff981ae /net/rds/ib_send.c | |
parent | 8690bfa17aea4c42da1bcf90a7af93d161eca624 (diff) |
RDS: break out rdma and data ops into nested structs in rds_message
Clearly separate rdma-related variables in rm from data-related ones.
This is in anticipation of adding atomic support.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_send.c')
-rw-r--r-- | net/rds/ib_send.c | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 0b0090d2ee01..53750203c9e5 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -83,11 +83,11 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, | |||
83 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); | 83 | rdsdebug("ic %p send %p rm %p\n", ic, send, rm); |
84 | 84 | ||
85 | ib_dma_unmap_sg(ic->i_cm_id->device, | 85 | ib_dma_unmap_sg(ic->i_cm_id->device, |
86 | rm->m_sg, rm->m_nents, | 86 | rm->data.m_sg, rm->data.m_nents, |
87 | DMA_TO_DEVICE); | 87 | DMA_TO_DEVICE); |
88 | 88 | ||
89 | if (rm->m_rdma_op) { | 89 | if (rm->rdma.m_rdma_op) { |
90 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | 90 | rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op); |
91 | 91 | ||
92 | /* If the user asked for a completion notification on this | 92 | /* If the user asked for a completion notification on this |
93 | * message, we can implement three different semantics: | 93 | * message, we can implement three different semantics: |
@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, | |||
111 | */ | 111 | */ |
112 | rds_ib_send_rdma_complete(rm, wc_status); | 112 | rds_ib_send_rdma_complete(rm, wc_status); |
113 | 113 | ||
114 | if (rm->m_rdma_op->r_write) | 114 | if (rm->rdma.m_rdma_op->r_write) |
115 | rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); | 115 | rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op->r_bytes); |
116 | else | 116 | else |
117 | rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); | 117 | rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op->r_bytes); |
118 | } | 118 | } |
119 | 119 | ||
120 | /* If anyone waited for this message to get flushed out, wake | 120 | /* If anyone waited for this message to get flushed out, wake |
@@ -244,8 +244,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
244 | 244 | ||
245 | rm = rds_send_get_message(conn, send->s_op); | 245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) { | 246 | if (rm) { |
247 | if (rm->m_rdma_op) | 247 | if (rm->rdma.m_rdma_op) |
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | 248 | rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op); |
249 | rds_ib_send_rdma_complete(rm, wc.status); | 249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); | 250 | rds_message_put(rm); |
251 | } | 251 | } |
@@ -532,18 +532,20 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
532 | rm->m_inc.i_hdr.h_flags, | 532 | rm->m_inc.i_hdr.h_flags, |
533 | be32_to_cpu(rm->m_inc.i_hdr.h_len)); | 533 | be32_to_cpu(rm->m_inc.i_hdr.h_len)); |
534 | */ | 534 | */ |
535 | if (rm->m_nents) { | 535 | if (rm->data.m_nents) { |
536 | rm->m_count = ib_dma_map_sg(dev, | 536 | rm->data.m_count = ib_dma_map_sg(dev, |
537 | rm->m_sg, rm->m_nents, DMA_TO_DEVICE); | 537 | rm->data.m_sg, |
538 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); | 538 | rm->data.m_nents, |
539 | if (rm->m_count == 0) { | 539 | DMA_TO_DEVICE); |
540 | rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count); | ||
541 | if (rm->data.m_count == 0) { | ||
540 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | 542 | rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); |
541 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 543 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); |
542 | ret = -ENOMEM; /* XXX ? */ | 544 | ret = -ENOMEM; /* XXX ? */ |
543 | goto out; | 545 | goto out; |
544 | } | 546 | } |
545 | } else { | 547 | } else { |
546 | rm->m_count = 0; | 548 | rm->data.m_count = 0; |
547 | } | 549 | } |
548 | 550 | ||
549 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | 551 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; |
@@ -559,10 +561,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
559 | 561 | ||
560 | /* If it has a RDMA op, tell the peer we did it. This is | 562 | /* If it has a RDMA op, tell the peer we did it. This is |
561 | * used by the peer to release use-once RDMA MRs. */ | 563 | * used by the peer to release use-once RDMA MRs. */ |
562 | if (rm->m_rdma_op) { | 564 | if (rm->rdma.m_rdma_op) { |
563 | struct rds_ext_header_rdma ext_hdr; | 565 | struct rds_ext_header_rdma ext_hdr; |
564 | 566 | ||
565 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); | 567 | ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op->r_key); |
566 | rds_message_add_extension(&rm->m_inc.i_hdr, | 568 | rds_message_add_extension(&rm->m_inc.i_hdr, |
567 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); | 569 | RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); |
568 | } | 570 | } |
@@ -590,7 +592,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
590 | send = &ic->i_sends[pos]; | 592 | send = &ic->i_sends[pos]; |
591 | first = send; | 593 | first = send; |
592 | prev = NULL; | 594 | prev = NULL; |
593 | scat = &rm->m_sg[sg]; | 595 | scat = &rm->data.m_sg[sg]; |
594 | sent = 0; | 596 | sent = 0; |
595 | i = 0; | 597 | i = 0; |
596 | 598 | ||
@@ -600,7 +602,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
600 | * or when requested by the user. Right now, we let | 602 | * or when requested by the user. Right now, we let |
601 | * the application choose. | 603 | * the application choose. |
602 | */ | 604 | */ |
603 | if (rm->m_rdma_op && rm->m_rdma_op->r_fence) | 605 | if (rm->rdma.m_rdma_op && rm->rdma.m_rdma_op->r_fence) |
604 | send_flags = IB_SEND_FENCE; | 606 | send_flags = IB_SEND_FENCE; |
605 | 607 | ||
606 | /* | 608 | /* |
@@ -619,7 +621,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
619 | } | 621 | } |
620 | 622 | ||
621 | /* if there's data reference it with a chain of work reqs */ | 623 | /* if there's data reference it with a chain of work reqs */ |
622 | for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { | 624 | for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) { |
623 | unsigned int len; | 625 | unsigned int len; |
624 | 626 | ||
625 | send = &ic->i_sends[pos]; | 627 | send = &ic->i_sends[pos]; |
@@ -697,7 +699,7 @@ add_header: | |||
697 | sent += sizeof(struct rds_header); | 699 | sent += sizeof(struct rds_header); |
698 | 700 | ||
699 | /* if we finished the message then send completion owns it */ | 701 | /* if we finished the message then send completion owns it */ |
700 | if (scat == &rm->m_sg[rm->m_count]) { | 702 | if (scat == &rm->data.m_sg[rm->data.m_count]) { |
701 | prev->s_rm = ic->i_rm; | 703 | prev->s_rm = ic->i_rm; |
702 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 704 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
703 | ic->i_rm = NULL; | 705 | ic->i_rm = NULL; |