aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
commit06c54055bebf919249aa1eb68312887c3cfe77b4 (patch)
tree223a49c09e5d26516ed0161b8a52d08454ae028e /drivers/net/ethernet/freescale
parent1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f (diff)
parente2e5c4c07caf810d7849658dca42f598b3938e21 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c net/bridge/br_multicast.c net/ipv6/sit.c The conflicts were minor: 1) sit.c changes overlap with change to ip_tunnel_xmit() signature. 2) br_multicast.c had an overlap between computing max_delay using msecs_to_jiffies and turning MLDV2_MRC() into an inline function with a name using lowercase instead of uppercase letters. 3) stmmac had two overlapping changes, one which conditionally allocated and hooked up a dma_cfg based upon the presence of the pbl OF property, and another one handling store-and-forward DMA made. The latter of which should not go into the new of_find_property() basic block. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c123
2 files changed, 78 insertions, 48 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a8..0120217a16dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short tx_ring_size;
300 unsigned short rx_ring_size;
301
299 struct platform_device *pdev; 302 struct platform_device *pdev;
300 303
301 int opened; 304 int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 0cd5e4b8b545..f9aacf5d8523 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
238 238
239static int mii_cnt; 239static int mii_cnt;
240 240
241static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) 241static inline
242struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
242{ 243{
243 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 244 struct bufdesc *new_bd = bdp + 1;
244 if (is_ex) 245 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
245 return (struct bufdesc *)(ex + 1); 246 struct bufdesc_ex *ex_base;
247 struct bufdesc *base;
248 int ring_size;
249
250 if (bdp >= fep->tx_bd_base) {
251 base = fep->tx_bd_base;
252 ring_size = fep->tx_ring_size;
253 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
254 } else {
255 base = fep->rx_bd_base;
256 ring_size = fep->rx_ring_size;
257 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
258 }
259
260 if (fep->bufdesc_ex)
261 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
262 ex_base : ex_new_bd);
246 else 263 else
247 return bdp + 1; 264 return (new_bd >= (base + ring_size)) ?
265 base : new_bd;
248} 266}
249 267
250static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) 268static inline
269struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
251{ 270{
252 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 271 struct bufdesc *new_bd = bdp - 1;
253 if (is_ex) 272 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
254 return (struct bufdesc *)(ex - 1); 273 struct bufdesc_ex *ex_base;
274 struct bufdesc *base;
275 int ring_size;
276
277 if (bdp >= fep->tx_bd_base) {
278 base = fep->tx_bd_base;
279 ring_size = fep->tx_ring_size;
280 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
281 } else {
282 base = fep->rx_bd_base;
283 ring_size = fep->rx_ring_size;
284 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
285 }
286
287 if (fep->bufdesc_ex)
288 return (struct bufdesc *)((ex_new_bd < ex_base) ?
289 (ex_new_bd + ring_size) : ex_new_bd);
255 else 290 else
256 return bdp - 1; 291 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
257} 292}
258 293
259static void *swap_buffer(void *bufaddr, int len) 294static void *swap_buffer(void *bufaddr, int len)
@@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
379 } 414 }
380 } 415 }
381 416
382 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 417 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
383 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && 418 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
384 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { 419 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
385 fep->delay_work.trig_tx = true; 420 fep->delay_work.trig_tx = true;
@@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
388 } 423 }
389 424
390 /* If this was the last BD in the ring, start at the beginning again. */ 425 /* If this was the last BD in the ring, start at the beginning again. */
391 if (status & BD_ENET_TX_WRAP) 426 bdp = fec_enet_get_nextdesc(bdp, fep);
392 bdp = fep->tx_bd_base;
393 else
394 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
395 427
396 fep->cur_tx = bdp; 428 fep->cur_tx = bdp;
397 429
@@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
416 448
417 /* Initialize the receive buffer descriptors. */ 449 /* Initialize the receive buffer descriptors. */
418 bdp = fep->rx_bd_base; 450 bdp = fep->rx_bd_base;
419 for (i = 0; i < RX_RING_SIZE; i++) { 451 for (i = 0; i < fep->rx_ring_size; i++) {
420 452
421 /* Initialize the BD for every fragment in the page. */ 453 /* Initialize the BD for every fragment in the page. */
422 if (bdp->cbd_bufaddr) 454 if (bdp->cbd_bufaddr)
423 bdp->cbd_sc = BD_ENET_RX_EMPTY; 455 bdp->cbd_sc = BD_ENET_RX_EMPTY;
424 else 456 else
425 bdp->cbd_sc = 0; 457 bdp->cbd_sc = 0;
426 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 458 bdp = fec_enet_get_nextdesc(bdp, fep);
427 } 459 }
428 460
429 /* Set the last buffer to wrap */ 461 /* Set the last buffer to wrap */
430 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 462 bdp = fec_enet_get_prevdesc(bdp, fep);
431 bdp->cbd_sc |= BD_SC_WRAP; 463 bdp->cbd_sc |= BD_SC_WRAP;
432 464
433 fep->cur_rx = fep->rx_bd_base; 465 fep->cur_rx = fep->rx_bd_base;
@@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
435 /* ...and the same for transmit */ 467 /* ...and the same for transmit */
436 bdp = fep->tx_bd_base; 468 bdp = fep->tx_bd_base;
437 fep->cur_tx = bdp; 469 fep->cur_tx = bdp;
438 for (i = 0; i < TX_RING_SIZE; i++) { 470 for (i = 0; i < fep->tx_ring_size; i++) {
439 471
440 /* Initialize the BD for every fragment in the page. */ 472 /* Initialize the BD for every fragment in the page. */
441 bdp->cbd_sc = 0; 473 bdp->cbd_sc = 0;
@@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
444 fep->tx_skbuff[i] = NULL; 476 fep->tx_skbuff[i] = NULL;
445 } 477 }
446 bdp->cbd_bufaddr = 0; 478 bdp->cbd_bufaddr = 0;
447 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 479 bdp = fec_enet_get_nextdesc(bdp, fep);
448 } 480 }
449 481
450 /* Set the last buffer to wrap */ 482 /* Set the last buffer to wrap */
451 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 483 bdp = fec_enet_get_prevdesc(bdp, fep);
452 bdp->cbd_sc |= BD_SC_WRAP; 484 bdp->cbd_sc |= BD_SC_WRAP;
453 fep->dirty_tx = bdp; 485 fep->dirty_tx = bdp;
454} 486}
@@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
509 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 541 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
510 if (fep->bufdesc_ex) 542 if (fep->bufdesc_ex)
511 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) 543 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
512 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 544 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
513 else 545 else
514 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 546 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
515 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 547 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
516 548
517 549
518 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 550 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
726 bdp = fep->dirty_tx; 758 bdp = fep->dirty_tx;
727 759
728 /* get next bdp of dirty_tx */ 760 /* get next bdp of dirty_tx */
729 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 761 bdp = fec_enet_get_nextdesc(bdp, fep);
730 bdp = fep->tx_bd_base;
731 else
732 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
733 762
734 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 763 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
735 764
@@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
799 fep->dirty_tx = bdp; 828 fep->dirty_tx = bdp;
800 829
801 /* Update pointer to next buffer descriptor to be transmitted */ 830 /* Update pointer to next buffer descriptor to be transmitted */
802 if (status & BD_ENET_TX_WRAP) 831 bdp = fec_enet_get_nextdesc(bdp, fep);
803 bdp = fep->tx_bd_base;
804 else
805 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
806 832
807 /* Since we have freed up a buffer, the ring is no longer full 833 /* Since we have freed up a buffer, the ring is no longer full
808 */ 834 */
@@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
970 htons(ETH_P_8021Q), 996 htons(ETH_P_8021Q),
971 vlan_tag); 997 vlan_tag);
972 998
973 if (!skb_defer_rx_timestamp(skb)) 999 napi_gro_receive(&fep->napi, skb);
974 napi_gro_receive(&fep->napi, skb);
975 } 1000 }
976 1001
977 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 1002 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -993,10 +1018,8 @@ rx_processing_done:
993 } 1018 }
994 1019
995 /* Update BD pointer to next entry */ 1020 /* Update BD pointer to next entry */
996 if (status & BD_ENET_RX_WRAP) 1021 bdp = fec_enet_get_nextdesc(bdp, fep);
997 bdp = fep->rx_bd_base; 1022
998 else
999 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1000 /* Doing this here will keep the FEC running while we process 1023 /* Doing this here will keep the FEC running while we process
1001 * incoming frames. On a heavily loaded network, we should be 1024 * incoming frames. On a heavily loaded network, we should be
1002 * able to keep up at the expense of system resources. 1025 * able to keep up at the expense of system resources.
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1662 struct bufdesc *bdp; 1685 struct bufdesc *bdp;
1663 1686
1664 bdp = fep->rx_bd_base; 1687 bdp = fep->rx_bd_base;
1665 for (i = 0; i < RX_RING_SIZE; i++) { 1688 for (i = 0; i < fep->rx_ring_size; i++) {
1666 skb = fep->rx_skbuff[i]; 1689 skb = fep->rx_skbuff[i];
1667 1690
1668 if (bdp->cbd_bufaddr) 1691 if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1670 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1693 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1671 if (skb) 1694 if (skb)
1672 dev_kfree_skb(skb); 1695 dev_kfree_skb(skb);
1673 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1696 bdp = fec_enet_get_nextdesc(bdp, fep);
1674 } 1697 }
1675 1698
1676 bdp = fep->tx_bd_base; 1699 bdp = fep->tx_bd_base;
1677 for (i = 0; i < TX_RING_SIZE; i++) 1700 for (i = 0; i < fep->tx_ring_size; i++)
1678 kfree(fep->tx_bounce[i]); 1701 kfree(fep->tx_bounce[i]);
1679} 1702}
1680 1703
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1686 struct bufdesc *bdp; 1709 struct bufdesc *bdp;
1687 1710
1688 bdp = fep->rx_bd_base; 1711 bdp = fep->rx_bd_base;
1689 for (i = 0; i < RX_RING_SIZE; i++) { 1712 for (i = 0; i < fep->rx_ring_size; i++) {
1690 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1713 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1691 if (!skb) { 1714 if (!skb) {
1692 fec_enet_free_buffers(ndev); 1715 fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1703 ebdp->cbd_esc = BD_ENET_RX_INT; 1726 ebdp->cbd_esc = BD_ENET_RX_INT;
1704 } 1727 }
1705 1728
1706 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1729 bdp = fec_enet_get_nextdesc(bdp, fep);
1707 } 1730 }
1708 1731
1709 /* Set the last buffer to wrap. */ 1732 /* Set the last buffer to wrap. */
1710 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1733 bdp = fec_enet_get_prevdesc(bdp, fep);
1711 bdp->cbd_sc |= BD_SC_WRAP; 1734 bdp->cbd_sc |= BD_SC_WRAP;
1712 1735
1713 bdp = fep->tx_bd_base; 1736 bdp = fep->tx_bd_base;
1714 for (i = 0; i < TX_RING_SIZE; i++) { 1737 for (i = 0; i < fep->tx_ring_size; i++) {
1715 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1738 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1716 1739
1717 bdp->cbd_sc = 0; 1740 bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1722 ebdp->cbd_esc = BD_ENET_TX_INT; 1745 ebdp->cbd_esc = BD_ENET_TX_INT;
1723 } 1746 }
1724 1747
1725 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1748 bdp = fec_enet_get_nextdesc(bdp, fep);
1726 } 1749 }
1727 1750
1728 /* Set the last buffer to wrap. */ 1751 /* Set the last buffer to wrap. */
1729 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1752 bdp = fec_enet_get_prevdesc(bdp, fep);
1730 bdp->cbd_sc |= BD_SC_WRAP; 1753 bdp->cbd_sc |= BD_SC_WRAP;
1731 1754
1732 return 0; 1755 return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
1966 /* Get the Ethernet address */ 1989 /* Get the Ethernet address */
1967 fec_get_mac(ndev); 1990 fec_get_mac(ndev);
1968 1991
1992 /* init the tx & rx ring size */
1993 fep->tx_ring_size = TX_RING_SIZE;
1994 fep->rx_ring_size = RX_RING_SIZE;
1995
1969 /* Set receive and transmit descriptor base. */ 1996 /* Set receive and transmit descriptor base. */
1970 fep->rx_bd_base = cbd_base; 1997 fep->rx_bd_base = cbd_base;
1971 if (fep->bufdesc_ex) 1998 if (fep->bufdesc_ex)
1972 fep->tx_bd_base = (struct bufdesc *) 1999 fep->tx_bd_base = (struct bufdesc *)
1973 (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); 2000 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
1974 else 2001 else
1975 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2002 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
1976 2003
1977 /* The FEC Ethernet specific entries in the device structure */ 2004 /* The FEC Ethernet specific entries in the device structure */
1978 ndev->watchdog_timeo = TX_TIMEOUT; 2005 ndev->watchdog_timeo = TX_TIMEOUT;