diff options
| -rw-r--r-- | drivers/net/sfc/efx.h | 5 | ||||
| -rw-r--r-- | drivers/net/sfc/selftest.c | 2 | ||||
| -rw-r--r-- | drivers/net/sfc/tx.c | 26 |
3 files changed, 8 insertions, 25 deletions
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 6f4639465163..9d83322e8517 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
| @@ -23,9 +23,8 @@ | |||
| 23 | #define EFX_MEM_BAR 2 | 23 | #define EFX_MEM_BAR 2 |
| 24 | 24 | ||
| 25 | /* TX */ | 25 | /* TX */ |
| 26 | extern netdev_tx_t efx_xmit(struct efx_nic *efx, | 26 | extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, |
| 27 | struct efx_tx_queue *tx_queue, | 27 | struct sk_buff *skb); |
| 28 | struct sk_buff *skb); | ||
| 29 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | 28 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); |
| 30 | extern void efx_stop_queue(struct efx_nic *efx); | 29 | extern void efx_stop_queue(struct efx_nic *efx); |
| 31 | extern void efx_wake_queue(struct efx_nic *efx); | 30 | extern void efx_wake_queue(struct efx_nic *efx); |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 70eb5f135573..eab8c2e5d5e1 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
| @@ -425,7 +425,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | |||
| 425 | 425 | ||
| 426 | if (efx_dev_registered(efx)) | 426 | if (efx_dev_registered(efx)) |
| 427 | netif_tx_lock_bh(efx->net_dev); | 427 | netif_tx_lock_bh(efx->net_dev); |
| 428 | rc = efx_xmit(efx, tx_queue, skb); | 428 | rc = efx_enqueue_skb(tx_queue, skb); |
| 429 | if (efx_dev_registered(efx)) | 429 | if (efx_dev_registered(efx)) |
| 430 | netif_tx_unlock_bh(efx->net_dev); | 430 | netif_tx_unlock_bh(efx->net_dev); |
| 431 | 431 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index e11632bd4138..a5e541dd8ce7 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
| @@ -152,11 +152,13 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |||
| 152 | * If any DMA mapping fails, any mapped fragments will be unmapped, | 152 | * If any DMA mapping fails, any mapped fragments will be unmapped, |
| 153 | * the queue's insert pointer will be restored to its original value. | 153 | * the queue's insert pointer will be restored to its original value. |
| 154 | * | 154 | * |
| 155 | * This function is split out from efx_hard_start_xmit to allow the | ||
| 156 | * loopback test to direct packets via specific TX queues. | ||
| 157 | * | ||
| 155 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY | 158 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY |
| 156 | * You must hold netif_tx_lock() to call this function. | 159 | * You must hold netif_tx_lock() to call this function. |
| 157 | */ | 160 | */ |
| 158 | static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, | 161 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
| 159 | struct sk_buff *skb) | ||
| 160 | { | 162 | { |
| 161 | struct efx_nic *efx = tx_queue->efx; | 163 | struct efx_nic *efx = tx_queue->efx; |
| 162 | struct pci_dev *pci_dev = efx->pci_dev; | 164 | struct pci_dev *pci_dev = efx->pci_dev; |
| @@ -352,24 +354,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
| 352 | } | 354 | } |
| 353 | } | 355 | } |
| 354 | 356 | ||
| 355 | /* Initiate a packet transmission on the specified TX queue. | ||
| 356 | * Note that returning anything other than NETDEV_TX_OK will cause the | ||
| 357 | * OS to free the skb. | ||
| 358 | * | ||
| 359 | * This function is split out from efx_hard_start_xmit to allow the | ||
| 360 | * loopback test to direct packets via specific TX queues. It is | ||
| 361 | * therefore a non-static inline, so as not to penalise performance | ||
| 362 | * for non-loopback transmissions. | ||
| 363 | * | ||
| 364 | * Context: netif_tx_lock held | ||
| 365 | */ | ||
| 366 | inline netdev_tx_t efx_xmit(struct efx_nic *efx, | ||
| 367 | struct efx_tx_queue *tx_queue, struct sk_buff *skb) | ||
| 368 | { | ||
| 369 | /* Map fragments for DMA and add to TX queue */ | ||
| 370 | return efx_enqueue_skb(tx_queue, skb); | ||
| 371 | } | ||
| 372 | |||
| 373 | /* Initiate a packet transmission. We use one channel per CPU | 357 | /* Initiate a packet transmission. We use one channel per CPU |
| 374 | * (sharing when we have more CPUs than channels). On Falcon, the TX | 358 | * (sharing when we have more CPUs than channels). On Falcon, the TX |
| 375 | * completion events will be directed back to the CPU that transmitted | 359 | * completion events will be directed back to the CPU that transmitted |
| @@ -393,7 +377,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, | |||
| 393 | else | 377 | else |
| 394 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; | 378 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; |
| 395 | 379 | ||
| 396 | return efx_xmit(efx, tx_queue, skb); | 380 | return efx_enqueue_skb(tx_queue, skb); |
| 397 | } | 381 | } |
| 398 | 382 | ||
| 399 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | 383 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
