diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-01-14 15:18:11 -0500 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:11:51 -0400 |
commit | da5a06cef5724737af4315715632f0a07dd5e116 (patch) | |
tree | fef68cae6cc0894247aeb6384708d58eb2c5fd8b /net/rds/ib_send.c | |
parent | 919ced4ce7d6ac62dd5be62d8993fe22a527d53a (diff) |
RDS: rewrite rds_ib_xmit
Now that the header always goes first, it is possible to
simplify rds_ib_xmit. Instead of having a path to handle 0-byte
dgrams and another path to handle >0, these can both be handled
in one path. This lets us eliminate xmit_populate_wr().
Rename sent to bytes_sent, to differentiate better from other
variable named "send".
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_send.c')
-rw-r--r-- | net/rds/ib_send.c | 123 |
1 files changed, 45 insertions, 78 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 46026d9091f1..06c1d7e032d2 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -425,38 +425,6 @@ void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) | |||
425 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 425 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
426 | } | 426 | } |
427 | 427 | ||
428 | static inline void | ||
429 | rds_ib_xmit_populate_wr(struct rds_ib_connection *ic, | ||
430 | struct rds_ib_send_work *send, unsigned int pos, | ||
431 | unsigned long buffer, unsigned int length, | ||
432 | int send_flags) | ||
433 | { | ||
434 | struct ib_sge *sge; | ||
435 | |||
436 | WARN_ON(pos != send - ic->i_sends); | ||
437 | |||
438 | send->s_wr.send_flags = send_flags; | ||
439 | send->s_wr.opcode = IB_WR_SEND; | ||
440 | send->s_wr.num_sge = 1; | ||
441 | send->s_wr.next = NULL; | ||
442 | send->s_queued = jiffies; | ||
443 | send->s_op = NULL; | ||
444 | |||
445 | sge = &send->s_sge[0]; | ||
446 | sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header)); | ||
447 | sge->length = sizeof(struct rds_header); | ||
448 | sge->lkey = ic->i_mr->lkey; | ||
449 | |||
450 | if (length != 0) { | ||
451 | send->s_wr.num_sge = 2; | ||
452 | |||
453 | sge = &send->s_sge[1]; | ||
454 | sge->addr = buffer; | ||
455 | sge->length = length; | ||
456 | sge->lkey = ic->i_mr->lkey; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /* | 428 | /* |
461 | * This can be called multiple times for a given message. The first time | 429 | * This can be called multiple times for a given message. The first time |
462 | * we see a message we map its scatterlist into the IB device so that | 430 | * we see a message we map its scatterlist into the IB device so that |
@@ -483,11 +451,11 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
483 | u32 pos; | 451 | u32 pos; |
484 | u32 i; | 452 | u32 i; |
485 | u32 work_alloc; | 453 | u32 work_alloc; |
486 | u32 credit_alloc; | 454 | u32 credit_alloc = 0; |
487 | u32 posted; | 455 | u32 posted; |
488 | u32 adv_credits = 0; | 456 | u32 adv_credits = 0; |
489 | int send_flags = 0; | 457 | int send_flags = 0; |
490 | int sent; | 458 | int bytes_sent = 0; |
491 | int ret; | 459 | int ret; |
492 | int flow_controlled = 0; | 460 | int flow_controlled = 0; |
493 | 461 | ||
@@ -515,7 +483,6 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
515 | goto out; | 483 | goto out; |
516 | } | 484 | } |
517 | 485 | ||
518 | credit_alloc = work_alloc; | ||
519 | if (ic->i_flowctl) { | 486 | if (ic->i_flowctl) { |
520 | credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); | 487 | credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); |
521 | adv_credits += posted; | 488 | adv_credits += posted; |
@@ -591,13 +558,6 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
591 | BUG_ON(adv_credits > 255); | 558 | BUG_ON(adv_credits > 255); |
592 | } | 559 | } |
593 | 560 | ||
594 | send = &ic->i_sends[pos]; | ||
595 | first = send; | ||
596 | prev = NULL; | ||
597 | scat = &rm->data.m_sg[sg]; | ||
598 | sent = 0; | ||
599 | i = 0; | ||
600 | |||
601 | /* Sometimes you want to put a fence between an RDMA | 561 | /* Sometimes you want to put a fence between an RDMA |
602 | * READ and the following SEND. | 562 | * READ and the following SEND. |
603 | * We could either do this all the time | 563 | * We could either do this all the time |
@@ -607,31 +567,45 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
607 | if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) | 567 | if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) |
608 | send_flags = IB_SEND_FENCE; | 568 | send_flags = IB_SEND_FENCE; |
609 | 569 | ||
610 | /* | 570 | /* Each frag gets a header. Msgs may be 0 bytes */ |
611 | * We could be copying the header into the unused tail of the page. | 571 | send = &ic->i_sends[pos]; |
612 | * That would need to be changed in the future when those pages might | 572 | first = send; |
613 | * be mapped userspace pages or page cache pages. So instead we always | 573 | prev = NULL; |
614 | * use a second sge and our long-lived ring of mapped headers. We send | 574 | scat = &rm->data.m_sg[sg]; |
615 | * the header after the data so that the data payload can be aligned on | 575 | i = 0; |
616 | * the receiver. | 576 | do { |
617 | */ | 577 | unsigned int len = 0; |
618 | 578 | ||
619 | /* handle a 0-len message */ | 579 | /* Set up the header */ |
620 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { | 580 | send->s_wr.send_flags = send_flags; |
621 | rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); | 581 | send->s_wr.opcode = IB_WR_SEND; |
622 | goto add_header; | 582 | send->s_wr.num_sge = 1; |
623 | } | 583 | send->s_wr.next = NULL; |
584 | send->s_queued = jiffies; | ||
585 | send->s_op = NULL; | ||
624 | 586 | ||
625 | /* if there's data reference it with a chain of work reqs */ | 587 | send->s_sge[0].addr = ic->i_send_hdrs_dma |
626 | for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) { | 588 | + (pos * sizeof(struct rds_header)); |
627 | unsigned int len; | 589 | send->s_sge[0].length = sizeof(struct rds_header); |
628 | 590 | ||
629 | send = &ic->i_sends[pos]; | 591 | memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); |
592 | |||
593 | /* Set up the data, if present */ | ||
594 | if (i < work_alloc | ||
595 | && scat != &rm->data.m_sg[rm->data.m_count]) { | ||
596 | len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); | ||
597 | send->s_wr.num_sge = 2; | ||
630 | 598 | ||
631 | len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); | 599 | send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off; |
632 | rds_ib_xmit_populate_wr(ic, send, pos, | 600 | send->s_sge[1].length = len; |
633 | ib_sg_dma_address(dev, scat) + off, len, | 601 | |
634 | send_flags); | 602 | bytes_sent += len; |
603 | off += len; | ||
604 | if (off == ib_sg_dma_len(dev, scat)) { | ||
605 | scat++; | ||
606 | off = 0; | ||
607 | } | ||
608 | } | ||
635 | 609 | ||
636 | /* | 610 | /* |
637 | * We want to delay signaling completions just enough to get | 611 | * We want to delay signaling completions just enough to get |
@@ -658,18 +632,6 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
658 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 632 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
659 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 633 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); |
660 | 634 | ||
661 | sent += len; | ||
662 | off += len; | ||
663 | if (off == ib_sg_dma_len(dev, scat)) { | ||
664 | scat++; | ||
665 | off = 0; | ||
666 | } | ||
667 | |||
668 | add_header: | ||
669 | /* Tack on the header after the data. The header SGE should already | ||
670 | * have been set up to point to the right header buffer. */ | ||
671 | memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); | ||
672 | |||
673 | if (adv_credits) { | 635 | if (adv_credits) { |
674 | struct rds_header *hdr = &ic->i_send_hdrs[pos]; | 636 | struct rds_header *hdr = &ic->i_send_hdrs[pos]; |
675 | 637 | ||
@@ -685,12 +647,16 @@ add_header: | |||
685 | prev = send; | 647 | prev = send; |
686 | 648 | ||
687 | pos = (pos + 1) % ic->i_send_ring.w_nr; | 649 | pos = (pos + 1) % ic->i_send_ring.w_nr; |
688 | } | 650 | send = &ic->i_sends[pos]; |
651 | i++; | ||
652 | |||
653 | } while (i < work_alloc | ||
654 | && scat != &rm->data.m_sg[rm->data.m_count]); | ||
689 | 655 | ||
690 | /* Account the RDS header in the number of bytes we sent, but just once. | 656 | /* Account the RDS header in the number of bytes we sent, but just once. |
691 | * The caller has no concept of fragmentation. */ | 657 | * The caller has no concept of fragmentation. */ |
692 | if (hdr_off == 0) | 658 | if (hdr_off == 0) |
693 | sent += sizeof(struct rds_header); | 659 | bytes_sent += sizeof(struct rds_header); |
694 | 660 | ||
695 | /* if we finished the message then send completion owns it */ | 661 | /* if we finished the message then send completion owns it */ |
696 | if (scat == &rm->data.m_sg[rm->data.m_count]) { | 662 | if (scat == &rm->data.m_sg[rm->data.m_count]) { |
@@ -699,6 +665,7 @@ add_header: | |||
699 | ic->i_rm = NULL; | 665 | ic->i_rm = NULL; |
700 | } | 666 | } |
701 | 667 | ||
668 | /* Put back wrs & credits we didn't use */ | ||
702 | if (i < work_alloc) { | 669 | if (i < work_alloc) { |
703 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); | 670 | rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); |
704 | work_alloc = i; | 671 | work_alloc = i; |
@@ -725,7 +692,7 @@ add_header: | |||
725 | goto out; | 692 | goto out; |
726 | } | 693 | } |
727 | 694 | ||
728 | ret = sent; | 695 | ret = bytes_sent; |
729 | out: | 696 | out: |
730 | BUG_ON(adv_credits); | 697 | BUG_ON(adv_credits); |
731 | return ret; | 698 | return ret; |