aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNimrod Andy <B38611@freescale.com>2014-06-11 20:16:18 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-12 14:01:57 -0400
commit61a4427b955f79dfaa735788511ce558962c9d70 (patch)
treef2c33f6f6d52885fd69578b030ebbe9a041990f3
parent3993c4e159eba0e10c0628737736d6fcf97ab9ef (diff)
net: fec: Factorize the .xmit transmit function
Make the code more readable and easy to support other features like SG, TSO, moving the common transmit function to one api. And the patch also factorize the getting BD index to it own function. CC: David Laight <David.Laight@ACULAB.COM> Signed-off-by: Fugang Duan <B38611@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c81
2 files changed, 47 insertions, 35 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..db967a08637d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,7 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short bufdesc_size;
299 unsigned short tx_ring_size; 300 unsigned short tx_ring_size;
300 unsigned short rx_ring_size; 301 unsigned short rx_ring_size;
301 302
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 802be17285b6..ee8e5477c819 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -287,6 +287,12 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
287 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 287 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
288} 288}
289 289
290static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
291 struct fec_enet_private *fep)
292{
293 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
294}
295
290static void *swap_buffer(void *bufaddr, int len) 296static void *swap_buffer(void *bufaddr, int len)
291{ 297{
292 int i; 298 int i;
@@ -313,8 +319,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
313 return 0; 319 return 0;
314} 320}
315 321
316static netdev_tx_t 322static int txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
317fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
318{ 323{
319 struct fec_enet_private *fep = netdev_priv(ndev); 324 struct fec_enet_private *fep = netdev_priv(ndev);
320 const struct platform_device_id *id_entry = 325 const struct platform_device_id *id_entry =
@@ -329,14 +334,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
329 334
330 status = bdp->cbd_sc; 335 status = bdp->cbd_sc;
331 336
332 if (status & BD_ENET_TX_READY) {
333 /* Ooops. All transmit buffers are full. Bail out.
334 * This should not happen, since ndev->tbusy should be set.
335 */
336 netdev_err(ndev, "tx queue full!\n");
337 return NETDEV_TX_BUSY;
338 }
339
340 /* Protocol checksum off-load for TCP and UDP. */ 337 /* Protocol checksum off-load for TCP and UDP. */
341 if (fec_enet_clear_csum(skb, ndev)) { 338 if (fec_enet_clear_csum(skb, ndev)) {
342 dev_kfree_skb_any(skb); 339 dev_kfree_skb_any(skb);
@@ -350,16 +347,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
350 bufaddr = skb->data; 347 bufaddr = skb->data;
351 bdp->cbd_datlen = skb->len; 348 bdp->cbd_datlen = skb->len;
352 349
353 /* 350 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
354 * On some FEC implementations data must be aligned on
355 * 4-byte boundaries. Use bounce buffers to copy data
356 * and get it aligned. Ugh.
357 */
358 if (fep->bufdesc_ex)
359 index = (struct bufdesc_ex *)bdp -
360 (struct bufdesc_ex *)fep->tx_bd_base;
361 else
362 index = bdp - fep->tx_bd_base;
363 351
364 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 352 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
365 memcpy(fep->tx_bounce[index], skb->data, skb->len); 353 memcpy(fep->tx_bounce[index], skb->data, skb->len);
@@ -433,15 +421,43 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
433 421
434 fep->cur_tx = bdp; 422 fep->cur_tx = bdp;
435 423
436 if (fep->cur_tx == fep->dirty_tx)
437 netif_stop_queue(ndev);
438
439 /* Trigger transmission start */ 424 /* Trigger transmission start */
440 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 425 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
441 426
442 return NETDEV_TX_OK; 427 return NETDEV_TX_OK;
443} 428}
444 429
430static netdev_tx_t
431fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
432{
433 struct fec_enet_private *fep = netdev_priv(ndev);
434 struct bufdesc *bdp;
435 unsigned short status;
436 int ret;
437
438 /* Fill in a Tx ring entry */
439 bdp = fep->cur_tx;
440
441 status = bdp->cbd_sc;
442
443 if (status & BD_ENET_TX_READY) {
444 /* Ooops. All transmit buffers are full. Bail out.
445 * This should not happen, since ndev->tbusy should be set.
446 */
447 netdev_err(ndev, "tx queue full!\n");
448 return NETDEV_TX_BUSY;
449 }
450
451 ret = txq_submit_skb(skb, ndev);
452 if (ret == -EBUSY)
453 return NETDEV_TX_BUSY;
454
455 if (fep->cur_tx == fep->dirty_tx)
456 netif_stop_queue(ndev);
457
458 return NETDEV_TX_OK;
459}
460
445/* Init RX & TX buffer descriptors 461/* Init RX & TX buffer descriptors
446 */ 462 */
447static void fec_enet_bd_init(struct net_device *dev) 463static void fec_enet_bd_init(struct net_device *dev)
@@ -770,11 +786,7 @@ fec_enet_tx(struct net_device *ndev)
770 if (bdp == fep->cur_tx) 786 if (bdp == fep->cur_tx)
771 break; 787 break;
772 788
773 if (fep->bufdesc_ex) 789 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
774 index = (struct bufdesc_ex *)bdp -
775 (struct bufdesc_ex *)fep->tx_bd_base;
776 else
777 index = bdp - fep->tx_bd_base;
778 790
779 skb = fep->tx_skbuff[index]; 791 skb = fep->tx_skbuff[index];
780 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, 792 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
@@ -921,11 +933,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
921 pkt_len = bdp->cbd_datlen; 933 pkt_len = bdp->cbd_datlen;
922 ndev->stats.rx_bytes += pkt_len; 934 ndev->stats.rx_bytes += pkt_len;
923 935
924 if (fep->bufdesc_ex) 936 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
925 index = (struct bufdesc_ex *)bdp -
926 (struct bufdesc_ex *)fep->rx_bd_base;
927 else
928 index = bdp - fep->rx_bd_base;
929 data = fep->rx_skbuff[index]->data; 937 data = fep->rx_skbuff[index]->data;
930 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 938 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
931 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 939 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -2061,11 +2069,14 @@ static int fec_enet_init(struct net_device *ndev)
2061 2069
2062 /* Set receive and transmit descriptor base. */ 2070 /* Set receive and transmit descriptor base. */
2063 fep->rx_bd_base = cbd_base; 2071 fep->rx_bd_base = cbd_base;
2064 if (fep->bufdesc_ex) 2072 if (fep->bufdesc_ex) {
2065 fep->tx_bd_base = (struct bufdesc *) 2073 fep->tx_bd_base = (struct bufdesc *)
2066 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); 2074 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
2067 else 2075 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2076 } else {
2068 fep->tx_bd_base = cbd_base + fep->rx_ring_size; 2077 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
2078 fep->bufdesc_size = sizeof(struct bufdesc);
2079 }
2069 2080
2070 /* The FEC Ethernet specific entries in the device structure */ 2081 /* The FEC Ethernet specific entries in the device structure */
2071 ndev->watchdog_timeo = TX_TIMEOUT; 2082 ndev->watchdog_timeo = TX_TIMEOUT;