diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2008-09-01 07:47:02 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-03 09:53:45 -0400 |
commit | 23d9e60b1ddc67ffedd77161ecff4895708088a4 (patch) | |
tree | 09fbd5497ba3b1234293c4f2790ad4f03f01e448 /drivers/net/sfc | |
parent | 28506563e22a3ec7cf86e5acd853af8e68fe148b (diff) |
sfc: Cleaned up struct tso_state fields
Squashed nested structures.
Renamed remaining_len to out_len, ifc.len to in_len, header_length to
header_len.
Moved ipv4_id into the group of output variables where it belongs.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/tx.c | 161 |
1 files changed, 75 insertions, 86 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 51429b6a4dee..550856fab16c 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -540,46 +540,37 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
540 | 540 | ||
541 | /** | 541 | /** |
542 | * struct tso_state - TSO state for an SKB | 542 | * struct tso_state - TSO state for an SKB |
543 | * @remaining_len: Bytes of data we've yet to segment | 543 | * @out_len: Remaining length in current segment |
544 | * @seqnum: Current sequence number | 544 | * @seqnum: Current sequence number |
545 | * @ipv4_id: Current IPv4 ID, host endian | ||
545 | * @packet_space: Remaining space in current packet | 546 | * @packet_space: Remaining space in current packet |
546 | * @ifc: Input fragment cursor. | 547 | * @dma_addr: DMA address of current position |
547 | * Where we are in the current fragment of the incoming SKB. These | 548 | * @in_len: Remaining length in current SKB fragment |
548 | * values get updated in place when we split a fragment over | 549 | * @unmap_len: Length of SKB fragment |
549 | * multiple packets. | 550 | * @unmap_addr: DMA address of SKB fragment |
550 | * @p: Parameters. | 551 | * @unmap_single: DMA single vs page mapping flag |
551 | * These values are set once at the start of the TSO send and do | 552 | * @header_len: Number of bytes of header |
552 | * not get changed as the routine progresses. | 553 | * @full_packet_size: Number of bytes to put in each outgoing segment |
553 | * | 554 | * |
554 | * The state used during segmentation. It is put into this data structure | 555 | * The state used during segmentation. It is put into this data structure |
555 | * just to make it easy to pass into inline functions. | 556 | * just to make it easy to pass into inline functions. |
556 | */ | 557 | */ |
557 | struct tso_state { | 558 | struct tso_state { |
558 | unsigned remaining_len; | 559 | /* Output position */ |
560 | unsigned out_len; | ||
559 | unsigned seqnum; | 561 | unsigned seqnum; |
562 | unsigned ipv4_id; | ||
560 | unsigned packet_space; | 563 | unsigned packet_space; |
561 | 564 | ||
562 | struct { | 565 | /* Input position */ |
563 | /* DMA address of current position */ | 566 | dma_addr_t dma_addr; |
564 | dma_addr_t dma_addr; | 567 | unsigned in_len; |
565 | /* Remaining length */ | 568 | unsigned unmap_len; |
566 | unsigned int len; | 569 | dma_addr_t unmap_addr; |
567 | /* DMA address and length of the whole fragment */ | 570 | bool unmap_single; |
568 | unsigned int unmap_len; | 571 | |
569 | dma_addr_t unmap_addr; | 572 | unsigned header_len; |
570 | bool unmap_single; | 573 | int full_packet_size; |
571 | } ifc; | ||
572 | |||
573 | struct { | ||
574 | /* The number of bytes of header */ | ||
575 | unsigned int header_length; | ||
576 | |||
577 | /* The number of bytes to put in each outgoing segment. */ | ||
578 | int full_packet_size; | ||
579 | |||
580 | /* Current IPv4 ID, host endian. */ | ||
581 | unsigned ipv4_id; | ||
582 | } p; | ||
583 | }; | 574 | }; |
584 | 575 | ||
585 | 576 | ||
@@ -840,35 +831,34 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) | |||
840 | /* All ethernet/IP/TCP headers combined size is TCP header size | 831 | /* All ethernet/IP/TCP headers combined size is TCP header size |
841 | * plus offset of TCP header relative to start of packet. | 832 | * plus offset of TCP header relative to start of packet. |
842 | */ | 833 | */ |
843 | st->p.header_length = ((tcp_hdr(skb)->doff << 2u) | 834 | st->header_len = ((tcp_hdr(skb)->doff << 2u) |
844 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | 835 | + PTR_DIFF(tcp_hdr(skb), skb->data)); |
845 | st->p.full_packet_size = (st->p.header_length | 836 | st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; |
846 | + skb_shinfo(skb)->gso_size); | ||
847 | 837 | ||
848 | st->p.ipv4_id = ntohs(ip_hdr(skb)->id); | 838 | st->ipv4_id = ntohs(ip_hdr(skb)->id); |
849 | st->seqnum = ntohl(tcp_hdr(skb)->seq); | 839 | st->seqnum = ntohl(tcp_hdr(skb)->seq); |
850 | 840 | ||
851 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | 841 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); |
852 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | 842 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); |
853 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | 843 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); |
854 | 844 | ||
855 | st->packet_space = st->p.full_packet_size; | 845 | st->packet_space = st->full_packet_size; |
856 | st->remaining_len = skb->len - st->p.header_length; | 846 | st->out_len = skb->len - st->header_len; |
857 | st->ifc.unmap_len = 0; | 847 | st->unmap_len = 0; |
858 | st->ifc.unmap_single = false; | 848 | st->unmap_single = false; |
859 | } | 849 | } |
860 | 850 | ||
861 | static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | 851 | static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, |
862 | skb_frag_t *frag) | 852 | skb_frag_t *frag) |
863 | { | 853 | { |
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, frag->page, | 854 | st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, |
865 | frag->page_offset, frag->size, | 855 | frag->page_offset, frag->size, |
866 | PCI_DMA_TODEVICE); | 856 | PCI_DMA_TODEVICE); |
867 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { | 857 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { |
868 | st->ifc.unmap_single = false; | 858 | st->unmap_single = false; |
869 | st->ifc.unmap_len = frag->size; | 859 | st->unmap_len = frag->size; |
870 | st->ifc.len = frag->size; | 860 | st->in_len = frag->size; |
871 | st->ifc.dma_addr = st->ifc.unmap_addr; | 861 | st->dma_addr = st->unmap_addr; |
872 | return 0; | 862 | return 0; |
873 | } | 863 | } |
874 | return -ENOMEM; | 864 | return -ENOMEM; |
@@ -878,16 +868,16 @@ static inline int | |||
878 | tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, | 868 | tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, |
879 | const struct sk_buff *skb) | 869 | const struct sk_buff *skb) |
880 | { | 870 | { |
881 | int hl = st->p.header_length; | 871 | int hl = st->header_len; |
882 | int len = skb_headlen(skb) - hl; | 872 | int len = skb_headlen(skb) - hl; |
883 | 873 | ||
884 | st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, | 874 | st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, |
885 | len, PCI_DMA_TODEVICE); | 875 | len, PCI_DMA_TODEVICE); |
886 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { | 876 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { |
887 | st->ifc.unmap_single = true; | 877 | st->unmap_single = true; |
888 | st->ifc.unmap_len = len; | 878 | st->unmap_len = len; |
889 | st->ifc.len = len; | 879 | st->in_len = len; |
890 | st->ifc.dma_addr = st->ifc.unmap_addr; | 880 | st->dma_addr = st->unmap_addr; |
891 | return 0; | 881 | return 0; |
892 | } | 882 | } |
893 | return -ENOMEM; | 883 | return -ENOMEM; |
@@ -911,38 +901,38 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | |||
911 | struct efx_tx_buffer *buffer; | 901 | struct efx_tx_buffer *buffer; |
912 | int n, end_of_packet, rc; | 902 | int n, end_of_packet, rc; |
913 | 903 | ||
914 | if (st->ifc.len == 0) | 904 | if (st->in_len == 0) |
915 | return 0; | 905 | return 0; |
916 | if (st->packet_space == 0) | 906 | if (st->packet_space == 0) |
917 | return 0; | 907 | return 0; |
918 | 908 | ||
919 | EFX_BUG_ON_PARANOID(st->ifc.len <= 0); | 909 | EFX_BUG_ON_PARANOID(st->in_len <= 0); |
920 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); | 910 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); |
921 | 911 | ||
922 | n = min(st->ifc.len, st->packet_space); | 912 | n = min(st->in_len, st->packet_space); |
923 | 913 | ||
924 | st->packet_space -= n; | 914 | st->packet_space -= n; |
925 | st->remaining_len -= n; | 915 | st->out_len -= n; |
926 | st->ifc.len -= n; | 916 | st->in_len -= n; |
927 | 917 | ||
928 | rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, &buffer); | 918 | rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); |
929 | if (likely(rc == 0)) { | 919 | if (likely(rc == 0)) { |
930 | if (st->remaining_len == 0) | 920 | if (st->out_len == 0) |
931 | /* Transfer ownership of the skb */ | 921 | /* Transfer ownership of the skb */ |
932 | buffer->skb = skb; | 922 | buffer->skb = skb; |
933 | 923 | ||
934 | end_of_packet = st->remaining_len == 0 || st->packet_space == 0; | 924 | end_of_packet = st->out_len == 0 || st->packet_space == 0; |
935 | buffer->continuation = !end_of_packet; | 925 | buffer->continuation = !end_of_packet; |
936 | 926 | ||
937 | if (st->ifc.len == 0) { | 927 | if (st->in_len == 0) { |
938 | /* Transfer ownership of the pci mapping */ | 928 | /* Transfer ownership of the pci mapping */ |
939 | buffer->unmap_len = st->ifc.unmap_len; | 929 | buffer->unmap_len = st->unmap_len; |
940 | buffer->unmap_single = st->ifc.unmap_single; | 930 | buffer->unmap_single = st->unmap_single; |
941 | st->ifc.unmap_len = 0; | 931 | st->unmap_len = 0; |
942 | } | 932 | } |
943 | } | 933 | } |
944 | 934 | ||
945 | st->ifc.dma_addr += n; | 935 | st->dma_addr += n; |
946 | return rc; | 936 | return rc; |
947 | } | 937 | } |
948 | 938 | ||
@@ -967,7 +957,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
967 | u8 *header; | 957 | u8 *header; |
968 | 958 | ||
969 | /* Allocate a DMA-mapped header buffer. */ | 959 | /* Allocate a DMA-mapped header buffer. */ |
970 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | 960 | if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { |
971 | if (tx_queue->tso_headers_free == NULL) { | 961 | if (tx_queue->tso_headers_free == NULL) { |
972 | if (efx_tsoh_block_alloc(tx_queue)) | 962 | if (efx_tsoh_block_alloc(tx_queue)) |
973 | return -1; | 963 | return -1; |
@@ -978,7 +968,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
978 | tsoh->unmap_len = 0; | 968 | tsoh->unmap_len = 0; |
979 | } else { | 969 | } else { |
980 | tx_queue->tso_long_headers++; | 970 | tx_queue->tso_long_headers++; |
981 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); | 971 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); |
982 | if (unlikely(!tsoh)) | 972 | if (unlikely(!tsoh)) |
983 | return -1; | 973 | return -1; |
984 | } | 974 | } |
@@ -988,33 +978,32 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
988 | tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | 978 | tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); |
989 | 979 | ||
990 | /* Copy and update the headers. */ | 980 | /* Copy and update the headers. */ |
991 | memcpy(header, skb->data, st->p.header_length); | 981 | memcpy(header, skb->data, st->header_len); |
992 | 982 | ||
993 | tsoh_th->seq = htonl(st->seqnum); | 983 | tsoh_th->seq = htonl(st->seqnum); |
994 | st->seqnum += skb_shinfo(skb)->gso_size; | 984 | st->seqnum += skb_shinfo(skb)->gso_size; |
995 | if (st->remaining_len > skb_shinfo(skb)->gso_size) { | 985 | if (st->out_len > skb_shinfo(skb)->gso_size) { |
996 | /* This packet will not finish the TSO burst. */ | 986 | /* This packet will not finish the TSO burst. */ |
997 | ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); | 987 | ip_length = st->full_packet_size - ETH_HDR_LEN(skb); |
998 | tsoh_th->fin = 0; | 988 | tsoh_th->fin = 0; |
999 | tsoh_th->psh = 0; | 989 | tsoh_th->psh = 0; |
1000 | } else { | 990 | } else { |
1001 | /* This packet will be the last in the TSO burst. */ | 991 | /* This packet will be the last in the TSO burst. */ |
1002 | ip_length = (st->p.header_length - ETH_HDR_LEN(skb) | 992 | ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; |
1003 | + st->remaining_len); | ||
1004 | tsoh_th->fin = tcp_hdr(skb)->fin; | 993 | tsoh_th->fin = tcp_hdr(skb)->fin; |
1005 | tsoh_th->psh = tcp_hdr(skb)->psh; | 994 | tsoh_th->psh = tcp_hdr(skb)->psh; |
1006 | } | 995 | } |
1007 | tsoh_iph->tot_len = htons(ip_length); | 996 | tsoh_iph->tot_len = htons(ip_length); |
1008 | 997 | ||
1009 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | 998 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ |
1010 | tsoh_iph->id = htons(st->p.ipv4_id); | 999 | tsoh_iph->id = htons(st->ipv4_id); |
1011 | st->p.ipv4_id++; | 1000 | st->ipv4_id++; |
1012 | 1001 | ||
1013 | st->packet_space = skb_shinfo(skb)->gso_size; | 1002 | st->packet_space = skb_shinfo(skb)->gso_size; |
1014 | ++tx_queue->tso_packets; | 1003 | ++tx_queue->tso_packets; |
1015 | 1004 | ||
1016 | /* Form a descriptor for this header. */ | 1005 | /* Form a descriptor for this header. */ |
1017 | efx_tso_put_header(tx_queue, tsoh, st->p.header_length); | 1006 | efx_tso_put_header(tx_queue, tsoh, st->header_len); |
1018 | 1007 | ||
1019 | return 0; | 1008 | return 0; |
1020 | } | 1009 | } |
@@ -1048,7 +1037,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1048 | /* Assume that skb header area contains exactly the headers, and | 1037 | /* Assume that skb header area contains exactly the headers, and |
1049 | * all payload is in the frag list. | 1038 | * all payload is in the frag list. |
1050 | */ | 1039 | */ |
1051 | if (skb_headlen(skb) == state.p.header_length) { | 1040 | if (skb_headlen(skb) == state.header_len) { |
1052 | /* Grab the first payload fragment. */ | 1041 | /* Grab the first payload fragment. */ |
1053 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | 1042 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); |
1054 | frag_i = 0; | 1043 | frag_i = 0; |
@@ -1072,7 +1061,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1072 | goto stop; | 1061 | goto stop; |
1073 | 1062 | ||
1074 | /* Move onto the next fragment? */ | 1063 | /* Move onto the next fragment? */ |
1075 | if (state.ifc.len == 0) { | 1064 | if (state.in_len == 0) { |
1076 | if (++frag_i >= skb_shinfo(skb)->nr_frags) | 1065 | if (++frag_i >= skb_shinfo(skb)->nr_frags) |
1077 | /* End of payload reached. */ | 1066 | /* End of payload reached. */ |
1078 | break; | 1067 | break; |
@@ -1108,13 +1097,13 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1108 | 1097 | ||
1109 | unwind: | 1098 | unwind: |
1110 | /* Free the DMA mapping we were in the process of writing out */ | 1099 | /* Free the DMA mapping we were in the process of writing out */ |
1111 | if (state.ifc.unmap_len) { | 1100 | if (state.unmap_len) { |
1112 | if (state.ifc.unmap_single) | 1101 | if (state.unmap_single) |
1113 | pci_unmap_single(efx->pci_dev, state.ifc.unmap_addr, | 1102 | pci_unmap_single(efx->pci_dev, state.unmap_addr, |
1114 | state.ifc.unmap_len, PCI_DMA_TODEVICE); | 1103 | state.unmap_len, PCI_DMA_TODEVICE); |
1115 | else | 1104 | else |
1116 | pci_unmap_page(efx->pci_dev, state.ifc.unmap_addr, | 1105 | pci_unmap_page(efx->pci_dev, state.unmap_addr, |
1117 | state.ifc.unmap_len, PCI_DMA_TODEVICE); | 1106 | state.unmap_len, PCI_DMA_TODEVICE); |
1118 | } | 1107 | } |
1119 | 1108 | ||
1120 | efx_enqueue_unwind(tx_queue); | 1109 | efx_enqueue_unwind(tx_queue); |