diff options
-rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 120 |
2 files changed, 77 insertions, 46 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ae236009f1a8..0120217a16dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -296,6 +296,9 @@ struct fec_enet_private { | |||
296 | /* The ring entries to be free()ed */ | 296 | /* The ring entries to be free()ed */ |
297 | struct bufdesc *dirty_tx; | 297 | struct bufdesc *dirty_tx; |
298 | 298 | ||
299 | unsigned short tx_ring_size; | ||
300 | unsigned short rx_ring_size; | ||
301 | |||
299 | struct platform_device *pdev; | 302 | struct platform_device *pdev; |
300 | 303 | ||
301 | int opened; | 304 | int opened; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c610a2716be4..fd90bf561d83 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -239,22 +239,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
239 | 239 | ||
240 | static int mii_cnt; | 240 | static int mii_cnt; |
241 | 241 | ||
242 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) | 242 | static inline |
243 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | ||
243 | { | 244 | { |
244 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 245 | struct bufdesc *new_bd = bdp + 1; |
245 | if (is_ex) | 246 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; |
246 | return (struct bufdesc *)(ex + 1); | 247 | struct bufdesc_ex *ex_base; |
248 | struct bufdesc *base; | ||
249 | int ring_size; | ||
250 | |||
251 | if (bdp >= fep->tx_bd_base) { | ||
252 | base = fep->tx_bd_base; | ||
253 | ring_size = fep->tx_ring_size; | ||
254 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | ||
255 | } else { | ||
256 | base = fep->rx_bd_base; | ||
257 | ring_size = fep->rx_ring_size; | ||
258 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | ||
259 | } | ||
260 | |||
261 | if (fep->bufdesc_ex) | ||
262 | return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? | ||
263 | ex_base : ex_new_bd); | ||
247 | else | 264 | else |
248 | return bdp + 1; | 265 | return (new_bd >= (base + ring_size)) ? |
266 | base : new_bd; | ||
249 | } | 267 | } |
250 | 268 | ||
251 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) | 269 | static inline |
270 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | ||
252 | { | 271 | { |
253 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 272 | struct bufdesc *new_bd = bdp - 1; |
254 | if (is_ex) | 273 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; |
255 | return (struct bufdesc *)(ex - 1); | 274 | struct bufdesc_ex *ex_base; |
275 | struct bufdesc *base; | ||
276 | int ring_size; | ||
277 | |||
278 | if (bdp >= fep->tx_bd_base) { | ||
279 | base = fep->tx_bd_base; | ||
280 | ring_size = fep->tx_ring_size; | ||
281 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | ||
282 | } else { | ||
283 | base = fep->rx_bd_base; | ||
284 | ring_size = fep->rx_ring_size; | ||
285 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | ||
286 | } | ||
287 | |||
288 | if (fep->bufdesc_ex) | ||
289 | return (struct bufdesc *)((ex_new_bd < ex_base) ? | ||
290 | (ex_new_bd + ring_size) : ex_new_bd); | ||
256 | else | 291 | else |
257 | return bdp - 1; | 292 | return (new_bd < base) ? (new_bd + ring_size) : new_bd; |
258 | } | 293 | } |
259 | 294 | ||
260 | static void *swap_buffer(void *bufaddr, int len) | 295 | static void *swap_buffer(void *bufaddr, int len) |
@@ -380,7 +415,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
380 | } | 415 | } |
381 | } | 416 | } |
382 | 417 | ||
383 | bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 418 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); |
384 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | 419 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && |
385 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | 420 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { |
386 | fep->delay_work.trig_tx = true; | 421 | fep->delay_work.trig_tx = true; |
@@ -389,10 +424,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
389 | } | 424 | } |
390 | 425 | ||
391 | /* If this was the last BD in the ring, start at the beginning again. */ | 426 | /* If this was the last BD in the ring, start at the beginning again. */ |
392 | if (status & BD_ENET_TX_WRAP) | 427 | bdp = fec_enet_get_nextdesc(bdp, fep); |
393 | bdp = fep->tx_bd_base; | ||
394 | else | ||
395 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
396 | 428 | ||
397 | fep->cur_tx = bdp; | 429 | fep->cur_tx = bdp; |
398 | 430 | ||
@@ -417,18 +449,18 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
417 | 449 | ||
418 | /* Initialize the receive buffer descriptors. */ | 450 | /* Initialize the receive buffer descriptors. */ |
419 | bdp = fep->rx_bd_base; | 451 | bdp = fep->rx_bd_base; |
420 | for (i = 0; i < RX_RING_SIZE; i++) { | 452 | for (i = 0; i < fep->rx_ring_size; i++) { |
421 | 453 | ||
422 | /* Initialize the BD for every fragment in the page. */ | 454 | /* Initialize the BD for every fragment in the page. */ |
423 | if (bdp->cbd_bufaddr) | 455 | if (bdp->cbd_bufaddr) |
424 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 456 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
425 | else | 457 | else |
426 | bdp->cbd_sc = 0; | 458 | bdp->cbd_sc = 0; |
427 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 459 | bdp = fec_enet_get_nextdesc(bdp, fep); |
428 | } | 460 | } |
429 | 461 | ||
430 | /* Set the last buffer to wrap */ | 462 | /* Set the last buffer to wrap */ |
431 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 463 | bdp = fec_enet_get_prevdesc(bdp, fep); |
432 | bdp->cbd_sc |= BD_SC_WRAP; | 464 | bdp->cbd_sc |= BD_SC_WRAP; |
433 | 465 | ||
434 | fep->cur_rx = fep->rx_bd_base; | 466 | fep->cur_rx = fep->rx_bd_base; |
@@ -436,7 +468,7 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
436 | /* ...and the same for transmit */ | 468 | /* ...and the same for transmit */ |
437 | bdp = fep->tx_bd_base; | 469 | bdp = fep->tx_bd_base; |
438 | fep->cur_tx = bdp; | 470 | fep->cur_tx = bdp; |
439 | for (i = 0; i < TX_RING_SIZE; i++) { | 471 | for (i = 0; i < fep->tx_ring_size; i++) { |
440 | 472 | ||
441 | /* Initialize the BD for every fragment in the page. */ | 473 | /* Initialize the BD for every fragment in the page. */ |
442 | bdp->cbd_sc = 0; | 474 | bdp->cbd_sc = 0; |
@@ -445,11 +477,11 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
445 | fep->tx_skbuff[i] = NULL; | 477 | fep->tx_skbuff[i] = NULL; |
446 | } | 478 | } |
447 | bdp->cbd_bufaddr = 0; | 479 | bdp->cbd_bufaddr = 0; |
448 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 480 | bdp = fec_enet_get_nextdesc(bdp, fep); |
449 | } | 481 | } |
450 | 482 | ||
451 | /* Set the last buffer to wrap */ | 483 | /* Set the last buffer to wrap */ |
452 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 484 | bdp = fec_enet_get_prevdesc(bdp, fep); |
453 | bdp->cbd_sc |= BD_SC_WRAP; | 485 | bdp->cbd_sc |= BD_SC_WRAP; |
454 | fep->dirty_tx = bdp; | 486 | fep->dirty_tx = bdp; |
455 | } | 487 | } |
@@ -510,10 +542,10 @@ fec_restart(struct net_device *ndev, int duplex) | |||
510 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 542 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
511 | if (fep->bufdesc_ex) | 543 | if (fep->bufdesc_ex) |
512 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) | 544 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) |
513 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 545 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); |
514 | else | 546 | else |
515 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 547 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
516 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 548 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); |
517 | 549 | ||
518 | 550 | ||
519 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 551 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
@@ -727,10 +759,7 @@ fec_enet_tx(struct net_device *ndev) | |||
727 | bdp = fep->dirty_tx; | 759 | bdp = fep->dirty_tx; |
728 | 760 | ||
729 | /* get next bdp of dirty_tx */ | 761 | /* get next bdp of dirty_tx */ |
730 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | 762 | bdp = fec_enet_get_nextdesc(bdp, fep); |
731 | bdp = fep->tx_bd_base; | ||
732 | else | ||
733 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
734 | 763 | ||
735 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 764 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
736 | 765 | ||
@@ -800,10 +829,7 @@ fec_enet_tx(struct net_device *ndev) | |||
800 | fep->dirty_tx = bdp; | 829 | fep->dirty_tx = bdp; |
801 | 830 | ||
802 | /* Update pointer to next buffer descriptor to be transmitted */ | 831 | /* Update pointer to next buffer descriptor to be transmitted */ |
803 | if (status & BD_ENET_TX_WRAP) | 832 | bdp = fec_enet_get_nextdesc(bdp, fep); |
804 | bdp = fep->tx_bd_base; | ||
805 | else | ||
806 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
807 | 833 | ||
808 | /* Since we have freed up a buffer, the ring is no longer full | 834 | /* Since we have freed up a buffer, the ring is no longer full |
809 | */ | 835 | */ |
@@ -993,10 +1019,8 @@ rx_processing_done: | |||
993 | } | 1019 | } |
994 | 1020 | ||
995 | /* Update BD pointer to next entry */ | 1021 | /* Update BD pointer to next entry */ |
996 | if (status & BD_ENET_RX_WRAP) | 1022 | bdp = fec_enet_get_nextdesc(bdp, fep); |
997 | bdp = fep->rx_bd_base; | 1023 | |
998 | else | ||
999 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1000 | /* Doing this here will keep the FEC running while we process | 1024 | /* Doing this here will keep the FEC running while we process |
1001 | * incoming frames. On a heavily loaded network, we should be | 1025 | * incoming frames. On a heavily loaded network, we should be |
1002 | * able to keep up at the expense of system resources. | 1026 | * able to keep up at the expense of system resources. |
@@ -1662,7 +1686,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1662 | struct bufdesc *bdp; | 1686 | struct bufdesc *bdp; |
1663 | 1687 | ||
1664 | bdp = fep->rx_bd_base; | 1688 | bdp = fep->rx_bd_base; |
1665 | for (i = 0; i < RX_RING_SIZE; i++) { | 1689 | for (i = 0; i < fep->rx_ring_size; i++) { |
1666 | skb = fep->rx_skbuff[i]; | 1690 | skb = fep->rx_skbuff[i]; |
1667 | 1691 | ||
1668 | if (bdp->cbd_bufaddr) | 1692 | if (bdp->cbd_bufaddr) |
@@ -1670,11 +1694,11 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1670 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1694 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1671 | if (skb) | 1695 | if (skb) |
1672 | dev_kfree_skb(skb); | 1696 | dev_kfree_skb(skb); |
1673 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1697 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1674 | } | 1698 | } |
1675 | 1699 | ||
1676 | bdp = fep->tx_bd_base; | 1700 | bdp = fep->tx_bd_base; |
1677 | for (i = 0; i < TX_RING_SIZE; i++) | 1701 | for (i = 0; i < fep->tx_ring_size; i++) |
1678 | kfree(fep->tx_bounce[i]); | 1702 | kfree(fep->tx_bounce[i]); |
1679 | } | 1703 | } |
1680 | 1704 | ||
@@ -1686,7 +1710,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1686 | struct bufdesc *bdp; | 1710 | struct bufdesc *bdp; |
1687 | 1711 | ||
1688 | bdp = fep->rx_bd_base; | 1712 | bdp = fep->rx_bd_base; |
1689 | for (i = 0; i < RX_RING_SIZE; i++) { | 1713 | for (i = 0; i < fep->rx_ring_size; i++) { |
1690 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | 1714 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
1691 | if (!skb) { | 1715 | if (!skb) { |
1692 | fec_enet_free_buffers(ndev); | 1716 | fec_enet_free_buffers(ndev); |
@@ -1703,15 +1727,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1703 | ebdp->cbd_esc = BD_ENET_RX_INT; | 1727 | ebdp->cbd_esc = BD_ENET_RX_INT; |
1704 | } | 1728 | } |
1705 | 1729 | ||
1706 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1730 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1707 | } | 1731 | } |
1708 | 1732 | ||
1709 | /* Set the last buffer to wrap. */ | 1733 | /* Set the last buffer to wrap. */ |
1710 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1734 | bdp = fec_enet_get_prevdesc(bdp, fep); |
1711 | bdp->cbd_sc |= BD_SC_WRAP; | 1735 | bdp->cbd_sc |= BD_SC_WRAP; |
1712 | 1736 | ||
1713 | bdp = fep->tx_bd_base; | 1737 | bdp = fep->tx_bd_base; |
1714 | for (i = 0; i < TX_RING_SIZE; i++) { | 1738 | for (i = 0; i < fep->tx_ring_size; i++) { |
1715 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | 1739 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); |
1716 | 1740 | ||
1717 | bdp->cbd_sc = 0; | 1741 | bdp->cbd_sc = 0; |
@@ -1722,11 +1746,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1722 | ebdp->cbd_esc = BD_ENET_TX_INT; | 1746 | ebdp->cbd_esc = BD_ENET_TX_INT; |
1723 | } | 1747 | } |
1724 | 1748 | ||
1725 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1749 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1726 | } | 1750 | } |
1727 | 1751 | ||
1728 | /* Set the last buffer to wrap. */ | 1752 | /* Set the last buffer to wrap. */ |
1729 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1753 | bdp = fec_enet_get_prevdesc(bdp, fep); |
1730 | bdp->cbd_sc |= BD_SC_WRAP; | 1754 | bdp->cbd_sc |= BD_SC_WRAP; |
1731 | 1755 | ||
1732 | return 0; | 1756 | return 0; |
@@ -1966,13 +1990,17 @@ static int fec_enet_init(struct net_device *ndev) | |||
1966 | /* Get the Ethernet address */ | 1990 | /* Get the Ethernet address */ |
1967 | fec_get_mac(ndev); | 1991 | fec_get_mac(ndev); |
1968 | 1992 | ||
1993 | /* init the tx & rx ring size */ | ||
1994 | fep->tx_ring_size = TX_RING_SIZE; | ||
1995 | fep->rx_ring_size = RX_RING_SIZE; | ||
1996 | |||
1969 | /* Set receive and transmit descriptor base. */ | 1997 | /* Set receive and transmit descriptor base. */ |
1970 | fep->rx_bd_base = cbd_base; | 1998 | fep->rx_bd_base = cbd_base; |
1971 | if (fep->bufdesc_ex) | 1999 | if (fep->bufdesc_ex) |
1972 | fep->tx_bd_base = (struct bufdesc *) | 2000 | fep->tx_bd_base = (struct bufdesc *) |
1973 | (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); | 2001 | (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); |
1974 | else | 2002 | else |
1975 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | 2003 | fep->tx_bd_base = cbd_base + fep->rx_ring_size; |
1976 | 2004 | ||
1977 | /* The FEC Ethernet specific entries in the device structure */ | 2005 | /* The FEC Ethernet specific entries in the device structure */ |
1978 | ndev->watchdog_timeo = TX_TIMEOUT; | 2006 | ndev->watchdog_timeo = TX_TIMEOUT; |