aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2009-11-23 11:07:05 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-24 13:58:58 -0500
commit497f5ba3236425dbcf20b55452a013f1d3695ebb (patch)
tree987f46153a3f66014b2c66b8c1f2dcd8f8044467 /drivers/net
parent9bc183d7f90793e5e72f4dfd21a5877c6cd4da78 (diff)
sfc: Remove redundant efx_xmit() function
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sfc/efx.h5
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c26
3 files changed, 8 insertions, 25 deletions
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 6f4639465163..9d83322e8517 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -23,9 +23,8 @@
23#define EFX_MEM_BAR 2 23#define EFX_MEM_BAR 2
24 24
25/* TX */ 25/* TX */
26extern netdev_tx_t efx_xmit(struct efx_nic *efx, 26extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
27 struct efx_tx_queue *tx_queue, 27 struct sk_buff *skb);
28 struct sk_buff *skb);
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 28extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_stop_queue(struct efx_nic *efx); 29extern void efx_stop_queue(struct efx_nic *efx);
31extern void efx_wake_queue(struct efx_nic *efx); 30extern void efx_wake_queue(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 70eb5f135573..eab8c2e5d5e1 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -425,7 +425,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
425 425
426 if (efx_dev_registered(efx)) 426 if (efx_dev_registered(efx))
427 netif_tx_lock_bh(efx->net_dev); 427 netif_tx_lock_bh(efx->net_dev);
428 rc = efx_xmit(efx, tx_queue, skb); 428 rc = efx_enqueue_skb(tx_queue, skb);
429 if (efx_dev_registered(efx)) 429 if (efx_dev_registered(efx))
430 netif_tx_unlock_bh(efx->net_dev); 430 netif_tx_unlock_bh(efx->net_dev);
431 431
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e11632bd4138..a5e541dd8ce7 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -152,11 +152,13 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
152 * If any DMA mapping fails, any mapped fragments will be unmapped, 152 * If any DMA mapping fails, any mapped fragments will be unmapped,
153 * the queue's insert pointer will be restored to its original value. 153 * the queue's insert pointer will be restored to its original value.
154 * 154 *
155 * This function is split out from efx_hard_start_xmit to allow the
156 * loopback test to direct packets via specific TX queues.
157 *
155 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 158 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
156 * You must hold netif_tx_lock() to call this function. 159 * You must hold netif_tx_lock() to call this function.
157 */ 160 */
158static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, 161netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
159 struct sk_buff *skb)
160{ 162{
161 struct efx_nic *efx = tx_queue->efx; 163 struct efx_nic *efx = tx_queue->efx;
162 struct pci_dev *pci_dev = efx->pci_dev; 164 struct pci_dev *pci_dev = efx->pci_dev;
@@ -352,24 +354,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
352 } 354 }
353} 355}
354 356
355/* Initiate a packet transmission on the specified TX queue.
356 * Note that returning anything other than NETDEV_TX_OK will cause the
357 * OS to free the skb.
358 *
359 * This function is split out from efx_hard_start_xmit to allow the
360 * loopback test to direct packets via specific TX queues. It is
361 * therefore a non-static inline, so as not to penalise performance
362 * for non-loopback transmissions.
363 *
364 * Context: netif_tx_lock held
365 */
366inline netdev_tx_t efx_xmit(struct efx_nic *efx,
367 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
368{
369 /* Map fragments for DMA and add to TX queue */
370 return efx_enqueue_skb(tx_queue, skb);
371}
372
373/* Initiate a packet transmission. We use one channel per CPU 357/* Initiate a packet transmission. We use one channel per CPU
374 * (sharing when we have more CPUs than channels). On Falcon, the TX 358 * (sharing when we have more CPUs than channels). On Falcon, the TX
375 * completion events will be directed back to the CPU that transmitted 359 * completion events will be directed back to the CPU that transmitted
@@ -393,7 +377,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
393 else 377 else
394 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; 378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
395 379
396 return efx_xmit(efx, tx_queue, skb); 380 return efx_enqueue_skb(tx_queue, skb);
397} 381}
398 382
399void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 383void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)