aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-05-17 15:52:20 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-08-24 14:00:26 -0400
commit7668ff9c2ad7d354655e23afa836a92d54d2ea63 (patch)
tree01eaf41af56d406f1783f2bc8f7f70812759a130
parent8f4cccbbd92f2ad0ddbbc498ef7cee2a1c3defe9 (diff)
sfc: Refactor struct efx_tx_buffer to use a flags field
Add a flags field to struct efx_tx_buffer, replacing the continuation and map_single booleans. Since a single descriptor cannot be both a TSO header and the last descriptor for an skb, unionise efx_tx_buffer::{skb,tsoh} and add flags for validity of these fields. Clear all flags in free buffers (whereas previously the continuation flag would be set). Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h27
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c78
3 files changed, 53 insertions, 56 deletions
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a989692..0ac01fa6e63c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,29 +91,30 @@ struct efx_special_buffer {
91}; 91};
92 92
93/** 93/**
94 * struct efx_tx_buffer - An Efx TX buffer 94 * struct efx_tx_buffer - buffer state for a TX descriptor
95 * @skb: The associated socket buffer. 95 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
96 * Set only on the final fragment of a packet; %NULL for all other 96 * freed when descriptor completes
97 * fragments. When this fragment completes, then we can free this 97 * @tsoh: When @flags & %EFX_TX_BUF_TSOH, the associated TSO header structure.
98 * skb.
99 * @tsoh: The associated TSO header structure, or %NULL if this
100 * buffer is not a TSO header.
101 * @dma_addr: DMA address of the fragment. 98 * @dma_addr: DMA address of the fragment.
99 * @flags: Flags for allocation and DMA mapping type
102 * @len: Length of this fragment. 100 * @len: Length of this fragment.
103 * This field is zero when the queue slot is empty. 101 * This field is zero when the queue slot is empty.
104 * @continuation: True if this fragment is not the end of a packet.
105 * @unmap_single: True if dma_unmap_single should be used.
106 * @unmap_len: Length of this fragment to unmap 102 * @unmap_len: Length of this fragment to unmap
107 */ 103 */
108struct efx_tx_buffer { 104struct efx_tx_buffer {
109 const struct sk_buff *skb; 105 union {
110 struct efx_tso_header *tsoh; 106 const struct sk_buff *skb;
107 struct efx_tso_header *tsoh;
108 };
111 dma_addr_t dma_addr; 109 dma_addr_t dma_addr;
110 unsigned short flags;
112 unsigned short len; 111 unsigned short len;
113 bool continuation;
114 bool unmap_single;
115 unsigned short unmap_len; 112 unsigned short unmap_len;
116}; 113};
114#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
115#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
116#define EFX_TX_BUF_TSOH 4 /* buffer is TSO header */
117#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
117 118
118/** 119/**
119 * struct efx_tx_queue - An Efx TX queue 120 * struct efx_tx_queue - An Efx TX queue
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799762d6..aa113709831d 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
401 ++tx_queue->write_count; 401 ++tx_queue->write_count;
402 402
403 /* Create TX descriptor ring entry */ 403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
404 EFX_POPULATE_QWORD_4(*txd, 405 EFX_POPULATE_QWORD_4(*txd,
405 FSF_AZ_TX_KER_CONT, buffer->continuation, 406 FSF_AZ_TX_KER_CONT,
407 buffer->flags & EFX_TX_BUF_CONT,
406 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
407 FSF_AZ_TX_KER_BUF_REGION, 0, 409 FSF_AZ_TX_KER_BUF_REGION, 0,
408 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 18713436b443..24c82f3ce0f3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -39,25 +39,25 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 41 buffer->unmap_len);
42 if (buffer->unmap_single) 42 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 DMA_TO_DEVICE); 44 DMA_TO_DEVICE);
45 else 45 else
46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 DMA_TO_DEVICE); 47 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 48 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
50 } 49 }
51 50
52 if (buffer->skb) { 51 if (buffer->flags & EFX_TX_BUF_SKB) {
53 (*pkts_compl)++; 52 (*pkts_compl)++;
54 (*bytes_compl) += buffer->skb->len; 53 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 54 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56 buffer->skb = NULL;
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 55 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n", 56 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count); 57 tx_queue->queue, tx_queue->read_count);
60 } 58 }
59
60 buffer->flags &= EFX_TX_BUF_TSOH;
61} 61}
62 62
63/** 63/**
@@ -89,14 +89,14 @@ static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
89static void efx_tsoh_free(struct efx_tx_queue *tx_queue, 89static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer) 90 struct efx_tx_buffer *buffer)
91{ 91{
92 if (buffer->tsoh) { 92 if (buffer->flags & EFX_TX_BUF_TSOH) {
93 if (likely(!buffer->tsoh->unmap_len)) { 93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free; 94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh; 95 tx_queue->tso_headers_free = buffer->tsoh;
96 } else { 96 } else {
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh); 97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98 } 98 }
99 buffer->tsoh = NULL; 99 buffer->flags &= ~EFX_TX_BUF_TSOH;
100 } 100 }
101} 101}
102 102
@@ -163,7 +163,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 163 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0; 164 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len; 165 unsigned int dma_len;
166 bool unmap_single; 166 unsigned short dma_flags;
167 int q_space, i = 0; 167 int q_space, i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK; 168 netdev_tx_t rc = NETDEV_TX_OK;
169 169
@@ -190,7 +190,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
190 * since this is more efficient on machines with sparse 190 * since this is more efficient on machines with sparse
191 * memory. 191 * memory.
192 */ 192 */
193 unmap_single = true; 193 dma_flags = EFX_TX_BUF_MAP_SINGLE;
194 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 194 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
195 195
196 /* Process all fragments */ 196 /* Process all fragments */
@@ -234,10 +234,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr]; 235 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer); 236 efx_tsoh_free(tx_queue, buffer);
237 EFX_BUG_ON_PARANOID(buffer->tsoh); 237 EFX_BUG_ON_PARANOID(buffer->flags);
238 EFX_BUG_ON_PARANOID(buffer->skb);
239 EFX_BUG_ON_PARANOID(buffer->len); 238 EFX_BUG_ON_PARANOID(buffer->len);
240 EFX_BUG_ON_PARANOID(!buffer->continuation);
241 EFX_BUG_ON_PARANOID(buffer->unmap_len); 239 EFX_BUG_ON_PARANOID(buffer->unmap_len);
242 240
243 dma_len = efx_max_tx_len(efx, dma_addr); 241 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +245,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
247 /* Fill out per descriptor fields */ 245 /* Fill out per descriptor fields */
248 buffer->len = dma_len; 246 buffer->len = dma_len;
249 buffer->dma_addr = dma_addr; 247 buffer->dma_addr = dma_addr;
248 buffer->flags = EFX_TX_BUF_CONT;
250 len -= dma_len; 249 len -= dma_len;
251 dma_addr += dma_len; 250 dma_addr += dma_len;
252 ++tx_queue->insert_count; 251 ++tx_queue->insert_count;
253 } while (len); 252 } while (len);
254 253
255 /* Transfer ownership of the unmapping to the final buffer */ 254 /* Transfer ownership of the unmapping to the final buffer */
256 buffer->unmap_single = unmap_single; 255 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
257 buffer->unmap_len = unmap_len; 256 buffer->unmap_len = unmap_len;
258 unmap_len = 0; 257 unmap_len = 0;
259 258
@@ -264,14 +263,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
264 len = skb_frag_size(fragment); 263 len = skb_frag_size(fragment);
265 i++; 264 i++;
266 /* Map for DMA */ 265 /* Map for DMA */
267 unmap_single = false; 266 dma_flags = 0;
268 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 267 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
269 DMA_TO_DEVICE); 268 DMA_TO_DEVICE);
270 } 269 }
271 270
272 /* Transfer ownership of the skb to the final buffer */ 271 /* Transfer ownership of the skb to the final buffer */
273 buffer->skb = skb; 272 buffer->skb = skb;
274 buffer->continuation = false; 273 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
275 274
276 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 275 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
277 276
@@ -302,7 +301,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
302 301
303 /* Free the fragment we were mid-way through pushing */ 302 /* Free the fragment we were mid-way through pushing */
304 if (unmap_len) { 303 if (unmap_len) {
305 if (unmap_single) 304 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
306 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 305 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
307 DMA_TO_DEVICE); 306 DMA_TO_DEVICE);
308 else 307 else
@@ -340,7 +339,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
340 } 339 }
341 340
342 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 341 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
343 buffer->continuation = true;
344 buffer->len = 0; 342 buffer->len = 0;
345 343
346 ++tx_queue->read_count; 344 ++tx_queue->read_count;
@@ -484,7 +482,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
484{ 482{
485 struct efx_nic *efx = tx_queue->efx; 483 struct efx_nic *efx = tx_queue->efx;
486 unsigned int entries; 484 unsigned int entries;
487 int i, rc; 485 int rc;
488 486
489 /* Create the smallest power-of-two aligned ring */ 487 /* Create the smallest power-of-two aligned ring */
490 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 488 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,8 +498,6 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
500 GFP_KERNEL); 498 GFP_KERNEL);
501 if (!tx_queue->buffer) 499 if (!tx_queue->buffer)
502 return -ENOMEM; 500 return -ENOMEM;
503 for (i = 0; i <= tx_queue->ptr_mask; ++i)
504 tx_queue->buffer[i].continuation = true;
505 501
506 /* Allocate hardware ring */ 502 /* Allocate hardware ring */
507 rc = efx_nic_probe_tx(tx_queue); 503 rc = efx_nic_probe_tx(tx_queue);
@@ -546,7 +542,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
546 unsigned int pkts_compl = 0, bytes_compl = 0; 542 unsigned int pkts_compl = 0, bytes_compl = 0;
547 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 543 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
548 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 544 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
549 buffer->continuation = true;
550 buffer->len = 0; 545 buffer->len = 0;
551 546
552 ++tx_queue->read_count; 547 ++tx_queue->read_count;
@@ -631,7 +626,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
631 * @in_len: Remaining length in current SKB fragment 626 * @in_len: Remaining length in current SKB fragment
632 * @unmap_len: Length of SKB fragment 627 * @unmap_len: Length of SKB fragment
633 * @unmap_addr: DMA address of SKB fragment 628 * @unmap_addr: DMA address of SKB fragment
634 * @unmap_single: DMA single vs page mapping flag 629 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
635 * @protocol: Network protocol (after any VLAN header) 630 * @protocol: Network protocol (after any VLAN header)
636 * @header_len: Number of bytes of header 631 * @header_len: Number of bytes of header
637 * @full_packet_size: Number of bytes to put in each outgoing segment 632 * @full_packet_size: Number of bytes to put in each outgoing segment
@@ -651,7 +646,7 @@ struct tso_state {
651 unsigned in_len; 646 unsigned in_len;
652 unsigned unmap_len; 647 unsigned unmap_len;
653 dma_addr_t unmap_addr; 648 dma_addr_t unmap_addr;
654 bool unmap_single; 649 unsigned short dma_flags;
655 650
656 __be16 protocol; 651 __be16 protocol;
657 unsigned header_len; 652 unsigned header_len;
@@ -833,9 +828,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
833 efx_tsoh_free(tx_queue, buffer); 828 efx_tsoh_free(tx_queue, buffer);
834 EFX_BUG_ON_PARANOID(buffer->len); 829 EFX_BUG_ON_PARANOID(buffer->len);
835 EFX_BUG_ON_PARANOID(buffer->unmap_len); 830 EFX_BUG_ON_PARANOID(buffer->unmap_len);
836 EFX_BUG_ON_PARANOID(buffer->skb); 831 EFX_BUG_ON_PARANOID(buffer->flags);
837 EFX_BUG_ON_PARANOID(!buffer->continuation);
838 EFX_BUG_ON_PARANOID(buffer->tsoh);
839 832
840 buffer->dma_addr = dma_addr; 833 buffer->dma_addr = dma_addr;
841 834
@@ -845,7 +838,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
845 if (dma_len >= len) 838 if (dma_len >= len)
846 break; 839 break;
847 840
848 buffer->len = dma_len; /* Don't set the other members */ 841 buffer->len = dma_len;
842 buffer->flags = EFX_TX_BUF_CONT;
849 dma_addr += dma_len; 843 dma_addr += dma_len;
850 len -= dma_len; 844 len -= dma_len;
851 } 845 }
@@ -873,12 +867,11 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
873 efx_tsoh_free(tx_queue, buffer); 867 efx_tsoh_free(tx_queue, buffer);
874 EFX_BUG_ON_PARANOID(buffer->len); 868 EFX_BUG_ON_PARANOID(buffer->len);
875 EFX_BUG_ON_PARANOID(buffer->unmap_len); 869 EFX_BUG_ON_PARANOID(buffer->unmap_len);
876 EFX_BUG_ON_PARANOID(buffer->skb); 870 EFX_BUG_ON_PARANOID(buffer->flags);
877 EFX_BUG_ON_PARANOID(!buffer->continuation);
878 EFX_BUG_ON_PARANOID(buffer->tsoh);
879 buffer->len = len; 871 buffer->len = len;
880 buffer->dma_addr = tsoh->dma_addr; 872 buffer->dma_addr = tsoh->dma_addr;
881 buffer->tsoh = tsoh; 873 buffer->tsoh = tsoh;
874 buffer->flags = EFX_TX_BUF_TSOH | EFX_TX_BUF_CONT;
882 875
883 ++tx_queue->insert_count; 876 ++tx_queue->insert_count;
884} 877}
@@ -896,11 +889,11 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
896 buffer = &tx_queue->buffer[tx_queue->insert_count & 889 buffer = &tx_queue->buffer[tx_queue->insert_count &
897 tx_queue->ptr_mask]; 890 tx_queue->ptr_mask];
898 efx_tsoh_free(tx_queue, buffer); 891 efx_tsoh_free(tx_queue, buffer);
899 EFX_BUG_ON_PARANOID(buffer->skb); 892 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_SKB);
900 if (buffer->unmap_len) { 893 if (buffer->unmap_len) {
901 unmap_addr = (buffer->dma_addr + buffer->len - 894 unmap_addr = (buffer->dma_addr + buffer->len -
902 buffer->unmap_len); 895 buffer->unmap_len);
903 if (buffer->unmap_single) 896 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
904 dma_unmap_single(&tx_queue->efx->pci_dev->dev, 897 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
905 unmap_addr, buffer->unmap_len, 898 unmap_addr, buffer->unmap_len,
906 DMA_TO_DEVICE); 899 DMA_TO_DEVICE);
@@ -911,7 +904,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
911 buffer->unmap_len = 0; 904 buffer->unmap_len = 0;
912 } 905 }
913 buffer->len = 0; 906 buffer->len = 0;
914 buffer->continuation = true; 907 buffer->flags = 0;
915 } 908 }
916} 909}
917 910
@@ -938,7 +931,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
938 931
939 st->out_len = skb->len - st->header_len; 932 st->out_len = skb->len - st->header_len;
940 st->unmap_len = 0; 933 st->unmap_len = 0;
941 st->unmap_single = false; 934 st->dma_flags = 0;
942} 935}
943 936
944static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 937static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +940,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
947 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 940 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948 skb_frag_size(frag), DMA_TO_DEVICE); 941 skb_frag_size(frag), DMA_TO_DEVICE);
949 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 942 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950 st->unmap_single = false; 943 st->dma_flags = 0;
951 st->unmap_len = skb_frag_size(frag); 944 st->unmap_len = skb_frag_size(frag);
952 st->in_len = skb_frag_size(frag); 945 st->in_len = skb_frag_size(frag);
953 st->dma_addr = st->unmap_addr; 946 st->dma_addr = st->unmap_addr;
@@ -965,7 +958,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
965 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, 958 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
966 len, DMA_TO_DEVICE); 959 len, DMA_TO_DEVICE);
967 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 960 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
968 st->unmap_single = true; 961 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
969 st->unmap_len = len; 962 st->unmap_len = len;
970 st->in_len = len; 963 st->in_len = len;
971 st->dma_addr = st->unmap_addr; 964 st->dma_addr = st->unmap_addr;
@@ -990,7 +983,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
990 struct tso_state *st) 983 struct tso_state *st)
991{ 984{
992 struct efx_tx_buffer *buffer; 985 struct efx_tx_buffer *buffer;
993 int n, end_of_packet, rc; 986 int n, rc;
994 987
995 if (st->in_len == 0) 988 if (st->in_len == 0)
996 return 0; 989 return 0;
@@ -1008,17 +1001,18 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1008 1001
1009 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 1002 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010 if (likely(rc == 0)) { 1003 if (likely(rc == 0)) {
1011 if (st->out_len == 0) 1004 if (st->out_len == 0) {
1012 /* Transfer ownership of the skb */ 1005 /* Transfer ownership of the skb */
1013 buffer->skb = skb; 1006 buffer->skb = skb;
1014 1007 buffer->flags = EFX_TX_BUF_SKB;
1015 end_of_packet = st->out_len == 0 || st->packet_space == 0; 1008 } else if (st->packet_space != 0) {
1016 buffer->continuation = !end_of_packet; 1009 buffer->flags = EFX_TX_BUF_CONT;
1010 }
1017 1011
1018 if (st->in_len == 0) { 1012 if (st->in_len == 0) {
1019 /* Transfer ownership of the DMA mapping */ 1013 /* Transfer ownership of the DMA mapping */
1020 buffer->unmap_len = st->unmap_len; 1014 buffer->unmap_len = st->unmap_len;
1021 buffer->unmap_single = st->unmap_single; 1015 buffer->flags |= st->dma_flags;
1022 st->unmap_len = 0; 1016 st->unmap_len = 0;
1023 } 1017 }
1024 } 1018 }
@@ -1195,7 +1189,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1195 unwind: 1189 unwind:
1196 /* Free the DMA mapping we were in the process of writing out */ 1190 /* Free the DMA mapping we were in the process of writing out */
1197 if (state.unmap_len) { 1191 if (state.unmap_len) {
1198 if (state.unmap_single) 1192 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1199 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1193 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1200 state.unmap_len, DMA_TO_DEVICE); 1194 state.unmap_len, DMA_TO_DEVICE);
1201 else 1195 else