aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r--drivers/net/sfc/tx.c26
1 files changed, 5 insertions, 21 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e11632bd4138..a5e541dd8ce7 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -152,11 +152,13 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
152 * If any DMA mapping fails, any mapped fragments will be unmapped, 152 * If any DMA mapping fails, any mapped fragments will be unmapped,
153 * the queue's insert pointer will be restored to its original value. 153 * the queue's insert pointer will be restored to its original value.
154 * 154 *
155 * This function is split out from efx_hard_start_xmit to allow the
156 * loopback test to direct packets via specific TX queues.
157 *
155 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 158 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
156 * You must hold netif_tx_lock() to call this function. 159 * You must hold netif_tx_lock() to call this function.
157 */ 160 */
158static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, 161netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
159 struct sk_buff *skb)
160{ 162{
161 struct efx_nic *efx = tx_queue->efx; 163 struct efx_nic *efx = tx_queue->efx;
162 struct pci_dev *pci_dev = efx->pci_dev; 164 struct pci_dev *pci_dev = efx->pci_dev;
@@ -352,24 +354,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
352 } 354 }
353} 355}
354 356
355/* Initiate a packet transmission on the specified TX queue.
356 * Note that returning anything other than NETDEV_TX_OK will cause the
357 * OS to free the skb.
358 *
359 * This function is split out from efx_hard_start_xmit to allow the
360 * loopback test to direct packets via specific TX queues. It is
361 * therefore a non-static inline, so as not to penalise performance
362 * for non-loopback transmissions.
363 *
364 * Context: netif_tx_lock held
365 */
366inline netdev_tx_t efx_xmit(struct efx_nic *efx,
367 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
368{
369 /* Map fragments for DMA and add to TX queue */
370 return efx_enqueue_skb(tx_queue, skb);
371}
372
373/* Initiate a packet transmission. We use one channel per CPU 357/* Initiate a packet transmission. We use one channel per CPU
374 * (sharing when we have more CPUs than channels). On Falcon, the TX 358 * (sharing when we have more CPUs than channels). On Falcon, the TX
375 * completion events will be directed back to the CPU that transmitted 359 * completion events will be directed back to the CPU that transmitted
@@ -393,7 +377,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
393 else 377 else
394 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; 378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
395 379
396 return efx_xmit(efx, tx_queue, skb); 380 return efx_enqueue_skb(tx_queue, skb);
397} 381}
398 382
399void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 383void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)