diff options
author | Edward Cree <ecree@solarflare.com> | 2014-10-17 10:32:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-17 23:47:32 -0400 |
commit | 70b33fb0ddec827cbbd14cdc664fc27b2ef4a6b6 (patch) | |
tree | 28167789dcd2eb00e6393a6732174a73669c60fe | |
parent | 6cc69f2a404dea8641d6cf97c0fbe8d24579e259 (diff) |
sfc: add support for skb->xmit_more
Don't ring the doorbell, and don't do PIO. This will also prevent
TX Push, because there will be more than one buffer waiting when
the doorbell is rung.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/sfc/nic.h | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 43 |
2 files changed, 43 insertions, 29 deletions
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 60f85149fc4c..f77cce034ad4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -71,9 +71,17 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | |||
71 | return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; | 71 | return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* Report whether the NIC considers this TX queue empty, given the | 74 | /* Get partner of a TX queue, seen as part of the same net core queue */ |
75 | * write_count used for the last doorbell push. May return false | 75 | static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) |
76 | * negative. | 76 | { |
77 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
78 | return tx_queue - EFX_TXQ_TYPE_OFFLOAD; | ||
79 | else | ||
80 | return tx_queue + EFX_TXQ_TYPE_OFFLOAD; | ||
81 | } | ||
82 | |||
83 | /* Report whether this TX queue would be empty for the given write_count. | ||
84 | * May return false negative. | ||
77 | */ | 85 | */ |
78 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | 86 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, |
79 | unsigned int write_count) | 87 | unsigned int write_count) |
@@ -86,9 +94,18 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | |||
86 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | 94 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; |
87 | } | 95 | } |
88 | 96 | ||
89 | static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) | 97 | /* Decide whether we can use TX PIO, ie. write packet data directly into |
98 | * a buffer on the device. This can reduce latency at the expense of | ||
99 | * throughput, so we only do this if both hardware and software TX rings | ||
100 | * are empty. This also ensures that only one packet at a time can be | ||
101 | * using the PIO buffer. | ||
102 | */ | ||
103 | static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) | ||
90 | { | 104 | { |
91 | return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count); | 105 | struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); |
106 | return tx_queue->piobuf && | ||
107 | __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && | ||
108 | __efx_nic_tx_is_empty(partner, partner->insert_count); | ||
92 | } | 109 | } |
93 | 110 | ||
94 | /* Decide whether to push a TX descriptor to the NIC vs merely writing | 111 | /* Decide whether to push a TX descriptor to the NIC vs merely writing |
@@ -96,6 +113,8 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) | |||
96 | * descriptor to an empty queue, but is otherwise pointless. Further, | 113 | * descriptor to an empty queue, but is otherwise pointless. Further, |
97 | * Falcon and Siena have hardware bugs (SF bug 33851) that may be | 114 | * Falcon and Siena have hardware bugs (SF bug 33851) that may be |
98 | * triggered if we don't check this. | 115 | * triggered if we don't check this. |
116 | * We use the write_count used for the last doorbell push, to get the | ||
117 | * NIC's view of the tx queue. | ||
99 | */ | 118 | */ |
100 | static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, | 119 | static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, |
101 | unsigned int write_count) | 120 | unsigned int write_count) |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 320609842211..ee84a90e371c 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -132,15 +132,6 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) | |||
132 | return max_descs; | 132 | return max_descs; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* Get partner of a TX queue, seen as part of the same net core queue */ | ||
136 | static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | ||
137 | { | ||
138 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | ||
139 | return tx_queue - EFX_TXQ_TYPE_OFFLOAD; | ||
140 | else | ||
141 | return tx_queue + EFX_TXQ_TYPE_OFFLOAD; | ||
142 | } | ||
143 | |||
144 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | 135 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) |
145 | { | 136 | { |
146 | /* We need to consider both queues that the net core sees as one */ | 137 | /* We need to consider both queues that the net core sees as one */ |
@@ -344,6 +335,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
344 | struct efx_nic *efx = tx_queue->efx; | 335 | struct efx_nic *efx = tx_queue->efx; |
345 | struct device *dma_dev = &efx->pci_dev->dev; | 336 | struct device *dma_dev = &efx->pci_dev->dev; |
346 | struct efx_tx_buffer *buffer; | 337 | struct efx_tx_buffer *buffer; |
338 | unsigned int old_insert_count = tx_queue->insert_count; | ||
347 | skb_frag_t *fragment; | 339 | skb_frag_t *fragment; |
348 | unsigned int len, unmap_len = 0; | 340 | unsigned int len, unmap_len = 0; |
349 | dma_addr_t dma_addr, unmap_addr = 0; | 341 | dma_addr_t dma_addr, unmap_addr = 0; |
@@ -351,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
351 | unsigned short dma_flags; | 343 | unsigned short dma_flags; |
352 | int i = 0; | 344 | int i = 0; |
353 | 345 | ||
354 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 346 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); |
355 | 347 | ||
356 | if (skb_shinfo(skb)->gso_size) | 348 | if (skb_shinfo(skb)->gso_size) |
357 | return efx_enqueue_skb_tso(tx_queue, skb); | 349 | return efx_enqueue_skb_tso(tx_queue, skb); |
@@ -369,9 +361,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
369 | 361 | ||
370 | /* Consider using PIO for short packets */ | 362 | /* Consider using PIO for short packets */ |
371 | #ifdef EFX_USE_PIO | 363 | #ifdef EFX_USE_PIO |
372 | if (skb->len <= efx_piobuf_size && tx_queue->piobuf && | 364 | if (skb->len <= efx_piobuf_size && !skb->xmit_more && |
373 | efx_nic_tx_is_empty(tx_queue) && | 365 | efx_nic_may_tx_pio(tx_queue)) { |
374 | efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) { | ||
375 | buffer = efx_enqueue_skb_pio(tx_queue, skb); | 366 | buffer = efx_enqueue_skb_pio(tx_queue, skb); |
376 | dma_flags = EFX_TX_BUF_OPTION; | 367 | dma_flags = EFX_TX_BUF_OPTION; |
377 | goto finish_packet; | 368 | goto finish_packet; |
@@ -439,13 +430,14 @@ finish_packet: | |||
439 | 430 | ||
440 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); | 431 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
441 | 432 | ||
433 | efx_tx_maybe_stop_queue(tx_queue); | ||
434 | |||
442 | /* Pass off to hardware */ | 435 | /* Pass off to hardware */ |
443 | efx_nic_push_buffers(tx_queue); | 436 | if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) |
437 | efx_nic_push_buffers(tx_queue); | ||
444 | 438 | ||
445 | tx_queue->tx_packets++; | 439 | tx_queue->tx_packets++; |
446 | 440 | ||
447 | efx_tx_maybe_stop_queue(tx_queue); | ||
448 | |||
449 | return NETDEV_TX_OK; | 441 | return NETDEV_TX_OK; |
450 | 442 | ||
451 | dma_err: | 443 | dma_err: |
@@ -458,7 +450,7 @@ finish_packet: | |||
458 | dev_kfree_skb_any(skb); | 450 | dev_kfree_skb_any(skb); |
459 | 451 | ||
460 | /* Work backwards until we hit the original insert pointer value */ | 452 | /* Work backwards until we hit the original insert pointer value */ |
461 | while (tx_queue->insert_count != tx_queue->write_count) { | 453 | while (tx_queue->insert_count != old_insert_count) { |
462 | unsigned int pkts_compl = 0, bytes_compl = 0; | 454 | unsigned int pkts_compl = 0, bytes_compl = 0; |
463 | --tx_queue->insert_count; | 455 | --tx_queue->insert_count; |
464 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); | 456 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
@@ -989,12 +981,13 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue, | |||
989 | /* Remove buffers put into a tx_queue. None of the buffers must have | 981 | /* Remove buffers put into a tx_queue. None of the buffers must have |
990 | * an skb attached. | 982 | * an skb attached. |
991 | */ | 983 | */ |
992 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | 984 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, |
985 | unsigned int insert_count) | ||
993 | { | 986 | { |
994 | struct efx_tx_buffer *buffer; | 987 | struct efx_tx_buffer *buffer; |
995 | 988 | ||
996 | /* Work backwards until we hit the original insert pointer value */ | 989 | /* Work backwards until we hit the original insert pointer value */ |
997 | while (tx_queue->insert_count != tx_queue->write_count) { | 990 | while (tx_queue->insert_count != insert_count) { |
998 | --tx_queue->insert_count; | 991 | --tx_queue->insert_count; |
999 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); | 992 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
1000 | efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); | 993 | efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); |
@@ -1258,13 +1251,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1258 | struct sk_buff *skb) | 1251 | struct sk_buff *skb) |
1259 | { | 1252 | { |
1260 | struct efx_nic *efx = tx_queue->efx; | 1253 | struct efx_nic *efx = tx_queue->efx; |
1254 | unsigned int old_insert_count = tx_queue->insert_count; | ||
1261 | int frag_i, rc; | 1255 | int frag_i, rc; |
1262 | struct tso_state state; | 1256 | struct tso_state state; |
1263 | 1257 | ||
1264 | /* Find the packet protocol and sanity-check it */ | 1258 | /* Find the packet protocol and sanity-check it */ |
1265 | state.protocol = efx_tso_check_protocol(skb); | 1259 | state.protocol = efx_tso_check_protocol(skb); |
1266 | 1260 | ||
1267 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 1261 | EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); |
1268 | 1262 | ||
1269 | rc = tso_start(&state, efx, skb); | 1263 | rc = tso_start(&state, efx, skb); |
1270 | if (rc) | 1264 | if (rc) |
@@ -1308,11 +1302,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1308 | 1302 | ||
1309 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); | 1303 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); |
1310 | 1304 | ||
1311 | /* Pass off to hardware */ | ||
1312 | efx_nic_push_buffers(tx_queue); | ||
1313 | |||
1314 | efx_tx_maybe_stop_queue(tx_queue); | 1305 | efx_tx_maybe_stop_queue(tx_queue); |
1315 | 1306 | ||
1307 | /* Pass off to hardware */ | ||
1308 | if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) | ||
1309 | efx_nic_push_buffers(tx_queue); | ||
1310 | |||
1316 | tx_queue->tso_bursts++; | 1311 | tx_queue->tso_bursts++; |
1317 | return NETDEV_TX_OK; | 1312 | return NETDEV_TX_OK; |
1318 | 1313 | ||
@@ -1336,6 +1331,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1336 | dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, | 1331 | dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, |
1337 | state.header_unmap_len, DMA_TO_DEVICE); | 1332 | state.header_unmap_len, DMA_TO_DEVICE); |
1338 | 1333 | ||
1339 | efx_enqueue_unwind(tx_queue); | 1334 | efx_enqueue_unwind(tx_queue, old_insert_count); |
1340 | return NETDEV_TX_OK; | 1335 | return NETDEV_TX_OK; |
1341 | } | 1336 | } |