aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-09-01 07:46:40 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-03 09:53:44 -0400
commitecbd95c17c221913cc3c5776051b2fa8b3b97316 (patch)
tree1ec0daca41be682886639a8dc3627edbe7571523 /drivers/net/sfc
parent5988b63a53e120a9db4439d4512f4c1b17e7170e (diff)
sfc: Use pci_map_single() to map the skb header when doing TSO
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/tx.c152
1 files changed, 83 insertions, 69 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 0452ea6937ab..11127757c05d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -287,9 +287,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
287 } 287 }
288 288
289 /* Free the fragment we were mid-way through pushing */ 289 /* Free the fragment we were mid-way through pushing */
290 if (unmap_len) 290 if (unmap_len) {
291 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 291 if (unmap_single)
292 PCI_DMA_TODEVICE); 292 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
293 PCI_DMA_TODEVICE);
294 else
295 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
296 PCI_DMA_TODEVICE);
297 }
293 298
294 return rc; 299 return rc;
295} 300}
@@ -561,8 +566,7 @@ struct tso_state {
561 /* DMA address and length of the whole fragment */ 566 /* DMA address and length of the whole fragment */
562 unsigned int unmap_len; 567 unsigned int unmap_len;
563 dma_addr_t unmap_addr; 568 dma_addr_t unmap_addr;
564 struct page *page; 569 unsigned int unmap_single;
565 unsigned page_off;
566 } ifc; 570 } ifc;
567 571
568 struct { 572 struct {
@@ -686,18 +690,14 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
686 * @tx_queue: Efx TX queue 690 * @tx_queue: Efx TX queue
687 * @dma_addr: DMA address of fragment 691 * @dma_addr: DMA address of fragment
688 * @len: Length of fragment 692 * @len: Length of fragment
689 * @skb: Only non-null for end of last segment 693 * @final_buffer: The final buffer inserted into the queue
690 * @end_of_packet: True if last fragment in a packet
691 * @unmap_addr: DMA address of fragment for unmapping
692 * @unmap_len: Only set this in last segment of a fragment
693 * 694 *
694 * Push descriptors onto the TX queue. Return 0 on success or 1 if 695 * Push descriptors onto the TX queue. Return 0 on success or 1 if
695 * @tx_queue full. 696 * @tx_queue full.
696 */ 697 */
697static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 698static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
698 dma_addr_t dma_addr, unsigned len, 699 dma_addr_t dma_addr, unsigned len,
699 const struct sk_buff *skb, int end_of_packet, 700 struct efx_tx_buffer **final_buffer)
700 dma_addr_t unmap_addr, unsigned unmap_len)
701{ 701{
702 struct efx_tx_buffer *buffer; 702 struct efx_tx_buffer *buffer;
703 struct efx_nic *efx = tx_queue->efx; 703 struct efx_nic *efx = tx_queue->efx;
@@ -725,8 +725,10 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
725 fill_level = (tx_queue->insert_count 725 fill_level = (tx_queue->insert_count
726 - tx_queue->old_read_count); 726 - tx_queue->old_read_count);
727 q_space = efx->type->txd_ring_mask - 1 - fill_level; 727 q_space = efx->type->txd_ring_mask - 1 - fill_level;
728 if (unlikely(q_space-- <= 0)) 728 if (unlikely(q_space-- <= 0)) {
729 *final_buffer = NULL;
729 return 1; 730 return 1;
731 }
730 smp_mb(); 732 smp_mb();
731 --tx_queue->stopped; 733 --tx_queue->stopped;
732 } 734 }
@@ -766,10 +768,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
766 768
767 EFX_BUG_ON_PARANOID(!len); 769 EFX_BUG_ON_PARANOID(!len);
768 buffer->len = len; 770 buffer->len = len;
769 buffer->skb = skb; 771 *final_buffer = buffer;
770 buffer->continuation = !end_of_packet;
771 buffer->unmap_addr = unmap_addr;
772 buffer->unmap_len = unmap_len;
773 return 0; 772 return 0;
774} 773}
775 774
@@ -817,9 +816,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
817 buffer->len = 0; 816 buffer->len = 0;
818 buffer->continuation = 1; 817 buffer->continuation = 1;
819 if (buffer->unmap_len) { 818 if (buffer->unmap_len) {
820 pci_unmap_page(tx_queue->efx->pci_dev, 819 if (buffer->unmap_single)
821 buffer->unmap_addr, 820 pci_unmap_single(tx_queue->efx->pci_dev,
822 buffer->unmap_len, PCI_DMA_TODEVICE); 821 buffer->unmap_addr,
822 buffer->unmap_len,
823 PCI_DMA_TODEVICE);
824 else
825 pci_unmap_page(tx_queue->efx->pci_dev,
826 buffer->unmap_addr,
827 buffer->unmap_len,
828 PCI_DMA_TODEVICE);
823 buffer->unmap_len = 0; 829 buffer->unmap_len = 0;
824 } 830 }
825 } 831 }
@@ -846,31 +852,40 @@ static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
846 852
847 st->packet_space = st->p.full_packet_size; 853 st->packet_space = st->p.full_packet_size;
848 st->remaining_len = skb->len - st->p.header_length; 854 st->remaining_len = skb->len - st->p.header_length;
855 st->ifc.unmap_len = 0;
856 st->ifc.unmap_single = 0;
849} 857}
850 858
851
852/**
853 * tso_get_fragment - record fragment details and map for DMA
854 * @st: TSO state
855 * @efx: Efx NIC
856 * @data: Pointer to fragment data
857 * @len: Length of fragment
858 *
859 * Record fragment details and map for DMA. Return 0 on success, or
860 * -%ENOMEM if DMA mapping fails.
861 */
862static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 859static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
863 int len, struct page *page, int page_off) 860 skb_frag_t *frag)
864{ 861{
862 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, frag->page,
863 frag->page_offset, frag->size,
864 PCI_DMA_TODEVICE);
865 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
866 st->ifc.unmap_single = 0;
867 st->ifc.unmap_len = frag->size;
868 st->ifc.len = frag->size;
869 st->ifc.dma_addr = st->ifc.unmap_addr;
870 return 0;
871 }
872 return -ENOMEM;
873}
874
875static inline int
876tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
877 const struct sk_buff *skb)
878{
879 int hl = st->p.header_length;
880 int len = skb_headlen(skb) - hl;
865 881
866 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 882 st->ifc.unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
867 len, PCI_DMA_TODEVICE); 883 len, PCI_DMA_TODEVICE);
868 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 884 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
885 st->ifc.unmap_single = 1;
869 st->ifc.unmap_len = len; 886 st->ifc.unmap_len = len;
870 st->ifc.len = len; 887 st->ifc.len = len;
871 st->ifc.dma_addr = st->ifc.unmap_addr; 888 st->ifc.dma_addr = st->ifc.unmap_addr;
872 st->ifc.page = page;
873 st->ifc.page_off = page_off;
874 return 0; 889 return 0;
875 } 890 }
876 return -ENOMEM; 891 return -ENOMEM;
@@ -891,7 +906,7 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
891 const struct sk_buff *skb, 906 const struct sk_buff *skb,
892 struct tso_state *st) 907 struct tso_state *st)
893{ 908{
894 909 struct efx_tx_buffer *buffer;
895 int n, end_of_packet, rc; 910 int n, end_of_packet, rc;
896 911
897 if (st->ifc.len == 0) 912 if (st->ifc.len == 0)
@@ -907,16 +922,25 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
907 st->packet_space -= n; 922 st->packet_space -= n;
908 st->remaining_len -= n; 923 st->remaining_len -= n;
909 st->ifc.len -= n; 924 st->ifc.len -= n;
910 st->ifc.page_off += n;
911 end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
912 925
913 rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, 926 rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, &buffer);
914 st->remaining_len ? NULL : skb, 927 if (likely(rc == 0)) {
915 end_of_packet, st->ifc.unmap_addr, 928 if (st->remaining_len == 0)
916 st->ifc.len ? 0 : st->ifc.unmap_len); 929 /* Transfer ownership of the skb */
930 buffer->skb = skb;
917 931
918 st->ifc.dma_addr += n; 932 end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
933 buffer->continuation = !end_of_packet;
919 934
935 if (st->ifc.len == 0) {
936 /* Transfer ownership of the pci mapping */
937 buffer->unmap_len = st->ifc.unmap_len;
938 buffer->unmap_single = st->ifc.unmap_single;
939 st->ifc.unmap_len = 0;
940 }
941 }
942
943 st->ifc.dma_addr += n;
920 return rc; 944 return rc;
921} 945}
922 946
@@ -1008,9 +1032,9 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1008static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1032static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1009 const struct sk_buff *skb) 1033 const struct sk_buff *skb)
1010{ 1034{
1035 struct efx_nic *efx = tx_queue->efx;
1011 int frag_i, rc, rc2 = NETDEV_TX_OK; 1036 int frag_i, rc, rc2 = NETDEV_TX_OK;
1012 struct tso_state state; 1037 struct tso_state state;
1013 skb_frag_t *f;
1014 1038
1015 /* Verify TSO is safe - these checks should never fail. */ 1039 /* Verify TSO is safe - these checks should never fail. */
1016 efx_tso_check_safe(skb); 1040 efx_tso_check_safe(skb);
@@ -1026,25 +1050,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1026 /* Grab the first payload fragment. */ 1050 /* Grab the first payload fragment. */
1027 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1051 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1028 frag_i = 0; 1052 frag_i = 0;
1029 f = &skb_shinfo(skb)->frags[frag_i]; 1053 rc = tso_get_fragment(&state, efx,
1030 rc = tso_get_fragment(&state, tx_queue->efx, 1054 skb_shinfo(skb)->frags + frag_i);
1031 f->size, f->page, f->page_offset);
1032 if (rc) 1055 if (rc)
1033 goto mem_err; 1056 goto mem_err;
1034 } else { 1057 } else {
1035 /* It may look like this code fragment assumes that the 1058 rc = tso_get_head_fragment(&state, efx, skb);
1036 * skb->data portion does not cross a page boundary, but
1037 * that is not the case. It is guaranteed to be direct
1038 * mapped memory, and therefore is physically contiguous,
1039 * and so DMA will work fine. kmap_atomic() on this region
1040 * will just return the direct mapping, so that will work
1041 * too.
1042 */
1043 int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
1044 int hl = state.p.header_length;
1045 rc = tso_get_fragment(&state, tx_queue->efx,
1046 skb_headlen(skb) - hl,
1047 virt_to_page(skb->data), page_off + hl);
1048 if (rc) 1059 if (rc)
1049 goto mem_err; 1060 goto mem_err;
1050 frag_i = -1; 1061 frag_i = -1;
@@ -1063,9 +1074,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1063 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1074 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1064 /* End of payload reached. */ 1075 /* End of payload reached. */
1065 break; 1076 break;
1066 f = &skb_shinfo(skb)->frags[frag_i]; 1077 rc = tso_get_fragment(&state, efx,
1067 rc = tso_get_fragment(&state, tx_queue->efx, 1078 skb_shinfo(skb)->frags + frag_i);
1068 f->size, f->page, f->page_offset);
1069 if (rc) 1079 if (rc)
1070 goto mem_err; 1080 goto mem_err;
1071 } 1081 }
@@ -1083,8 +1093,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1083 return NETDEV_TX_OK; 1093 return NETDEV_TX_OK;
1084 1094
1085 mem_err: 1095 mem_err:
1086 EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" 1096 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1087 " error\n");
1088 dev_kfree_skb_any((struct sk_buff *)skb); 1097 dev_kfree_skb_any((struct sk_buff *)skb);
1089 goto unwind; 1098 goto unwind;
1090 1099
@@ -1093,13 +1102,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1093 1102
1094 /* Stop the queue if it wasn't stopped before. */ 1103 /* Stop the queue if it wasn't stopped before. */
1095 if (tx_queue->stopped == 1) 1104 if (tx_queue->stopped == 1)
1096 efx_stop_queue(tx_queue->efx); 1105 efx_stop_queue(efx);
1097 1106
1098 unwind: 1107 unwind:
1099 /* Free the DMA mapping we were in the process of writing out */ 1108 /* Free the DMA mapping we were in the process of writing out */
1100 if (state.ifc.unmap_len) 1109 if (state.ifc.unmap_len) {
1101 pci_unmap_page(tx_queue->efx->pci_dev, state.ifc.unmap_addr, 1110 if (state.ifc.unmap_single)
1102 state.ifc.unmap_len, PCI_DMA_TODEVICE); 1111 pci_unmap_single(efx->pci_dev, state.ifc.unmap_addr,
1112 state.ifc.unmap_len, PCI_DMA_TODEVICE);
1113 else
1114 pci_unmap_page(efx->pci_dev, state.ifc.unmap_addr,
1115 state.ifc.unmap_len, PCI_DMA_TODEVICE);
1116 }
1103 1117
1104 efx_enqueue_unwind(tx_queue); 1118 efx_enqueue_unwind(tx_queue);
1105 return rc2; 1119 return rc2;