aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig41
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bfin_mac.c145
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/davinci_cpdma.c965
-rw-r--r--drivers/net/davinci_cpdma.h108
-rw-r--r--drivers/net/davinci_emac.c1338
-rw-r--r--drivers/net/davinci_mdio.c475
-rw-r--r--drivers/net/mlx4/en_main.c15
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_port.c4
-rw-r--r--drivers/net/mlx4/en_port.h3
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c19
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc911x.h11
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/xilinx_emaclite.c8
21 files changed, 1943 insertions, 1237 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f24179d98db9..f6668cdaac85 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -883,14 +883,6 @@ config BFIN_RX_DESC_NUM
883 help 883 help
884 Set the number of buffer packets used in driver. 884 Set the number of buffer packets used in driver.
885 885
886config BFIN_MAC_RMII
887 bool "RMII PHY Interface"
888 depends on BFIN_MAC
889 default y if BFIN527_EZKIT
890 default n if BFIN537_STAMP
891 help
892 Use Reduced PHY MII Interface
893
894config BFIN_MAC_USE_HWSTAMP 886config BFIN_MAC_USE_HWSTAMP
895 bool "Use IEEE 1588 hwstamp" 887 bool "Use IEEE 1588 hwstamp"
896 depends on BFIN_MAC && BF518 888 depends on BFIN_MAC && BF518
@@ -954,6 +946,8 @@ config NET_NETX
954config TI_DAVINCI_EMAC 946config TI_DAVINCI_EMAC
955 tristate "TI DaVinci EMAC Support" 947 tristate "TI DaVinci EMAC Support"
956 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 948 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
949 select TI_DAVINCI_MDIO
950 select TI_DAVINCI_CPDMA
957 select PHYLIB 951 select PHYLIB
958 help 952 help
959 This driver supports TI's DaVinci Ethernet . 953 This driver supports TI's DaVinci Ethernet .
@@ -961,6 +955,25 @@ config TI_DAVINCI_EMAC
961 To compile this driver as a module, choose M here: the module 955 To compile this driver as a module, choose M here: the module
962 will be called davinci_emac_driver. This is recommended. 956 will be called davinci_emac_driver. This is recommended.
963 957
958config TI_DAVINCI_MDIO
959 tristate "TI DaVinci MDIO Support"
960 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
961 select PHYLIB
962 help
963 This driver supports TI's DaVinci MDIO module.
964
965 To compile this driver as a module, choose M here: the module
966 will be called davinci_mdio. This is recommended.
967
968config TI_DAVINCI_CPDMA
969 tristate "TI DaVinci CPDMA Support"
970 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
971 help
972 This driver supports TI's DaVinci CPDMA dma engine.
973
974 To compile this driver as a module, choose M here: the module
975 will be called davinci_cpdma. This is recommended.
976
964config DM9000 977config DM9000
965 tristate "DM9000 support" 978 tristate "DM9000 support"
966 depends on ARM || BLACKFIN || MIPS 979 depends on ARM || BLACKFIN || MIPS
@@ -1028,7 +1041,7 @@ config SMC911X
1028 tristate "SMSC LAN911[5678] support" 1041 tristate "SMSC LAN911[5678] support"
1029 select CRC32 1042 select CRC32
1030 select MII 1043 select MII
1031 depends on ARM || SUPERH 1044 depends on ARM || SUPERH || MN10300
1032 help 1045 help
1033 This is a driver for SMSC's LAN911x series of Ethernet chipsets 1046 This is a driver for SMSC's LAN911x series of Ethernet chipsets
1034 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 1047 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1042,7 +1055,7 @@ config SMC911X
1042 1055
1043config SMSC911X 1056config SMSC911X
1044 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 1057 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
1045 depends on ARM || SUPERH || BLACKFIN || MIPS 1058 depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
1046 select CRC32 1059 select CRC32
1047 select MII 1060 select MII
1048 select PHYLIB 1061 select PHYLIB
@@ -1054,6 +1067,14 @@ config SMSC911X
1054 <file:Documentation/networking/net-modules.txt>. The module 1067 <file:Documentation/networking/net-modules.txt>. The module
1055 will be called smsc911x. 1068 will be called smsc911x.
1056 1069
1070config SMSC911X_ARCH_HOOKS
1071 def_bool n
1072 depends on SMSC911X
1073 help
1074 If the arch enables this, it allows the arch to implement various
1075 hooks for more comprehensive interrupt control and also to override
1076 the source of the MAC address.
1077
1057config NET_VENDOR_RACAL 1078config NET_VENDOR_RACAL
1058 bool "Racal-Interlan (Micom) NI cards" 1079 bool "Racal-Interlan (Micom) NI cards"
1059 depends on ISA 1080 depends on ISA
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b8bf93d4a132..652fc6b98039 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
7obj-$(CONFIG_PHYLIB) += phy/ 7obj-$(CONFIG_PHYLIB) += phy/
8 8
9obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 9obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
10obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
11obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10 12
11obj-$(CONFIG_E1000) += e1000/ 13obj-$(CONFIG_E1000) += e1000/
12obj-$(CONFIG_E1000E) += e1000e/ 14obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index f7233191162b..ce1e5e9d06f6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Blackfin On-Chip MAC Driver 2 * Blackfin On-Chip MAC Driver
3 * 3 *
4 * Copyright 2004-2007 Analog Devices Inc. 4 * Copyright 2004-2010 Analog Devices Inc.
5 * 5 *
6 * Enter bugs at http://blackfin.uclinux.org/ 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
@@ -23,7 +23,6 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mii.h> 25#include <linux/mii.h>
26#include <linux/phy.h>
27#include <linux/netdevice.h> 26#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
29#include <linux/ethtool.h> 28#include <linux/ethtool.h>
@@ -76,12 +75,6 @@ static struct net_dma_desc_tx *current_tx_ptr;
76static struct net_dma_desc_tx *tx_desc; 75static struct net_dma_desc_tx *tx_desc;
77static struct net_dma_desc_rx *rx_desc; 76static struct net_dma_desc_rx *rx_desc;
78 77
79#if defined(CONFIG_BFIN_MAC_RMII)
80static u16 pin_req[] = P_RMII0;
81#else
82static u16 pin_req[] = P_MII0;
83#endif
84
85static void desc_list_free(void) 78static void desc_list_free(void)
86{ 79{
87 struct net_dma_desc_rx *r; 80 struct net_dma_desc_rx *r;
@@ -347,23 +340,23 @@ static void bfin_mac_adjust_link(struct net_device *dev)
347 } 340 }
348 341
349 if (phydev->speed != lp->old_speed) { 342 if (phydev->speed != lp->old_speed) {
350#if defined(CONFIG_BFIN_MAC_RMII) 343 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
351 u32 opmode = bfin_read_EMAC_OPMODE(); 344 u32 opmode = bfin_read_EMAC_OPMODE();
352 switch (phydev->speed) { 345 switch (phydev->speed) {
353 case 10: 346 case 10:
354 opmode |= RMII_10; 347 opmode |= RMII_10;
355 break; 348 break;
356 case 100: 349 case 100:
357 opmode &= ~(RMII_10); 350 opmode &= ~RMII_10;
358 break; 351 break;
359 default: 352 default:
360 printk(KERN_WARNING 353 printk(KERN_WARNING
361 "%s: Ack! Speed (%d) is not 10/100!\n", 354 "%s: Ack! Speed (%d) is not 10/100!\n",
362 DRV_NAME, phydev->speed); 355 DRV_NAME, phydev->speed);
363 break; 356 break;
357 }
358 bfin_write_EMAC_OPMODE(opmode);
364 } 359 }
365 bfin_write_EMAC_OPMODE(opmode);
366#endif
367 360
368 new_state = 1; 361 new_state = 1;
369 lp->old_speed = phydev->speed; 362 lp->old_speed = phydev->speed;
@@ -392,7 +385,7 @@ static void bfin_mac_adjust_link(struct net_device *dev)
392/* MDC = 2.5 MHz */ 385/* MDC = 2.5 MHz */
393#define MDC_CLK 2500000 386#define MDC_CLK 2500000
394 387
395static int mii_probe(struct net_device *dev) 388static int mii_probe(struct net_device *dev, int phy_mode)
396{ 389{
397 struct bfin_mac_local *lp = netdev_priv(dev); 390 struct bfin_mac_local *lp = netdev_priv(dev);
398 struct phy_device *phydev = NULL; 391 struct phy_device *phydev = NULL;
@@ -411,8 +404,8 @@ static int mii_probe(struct net_device *dev)
411 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); 404 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
412 bfin_write_EMAC_SYSCTL(sysctl); 405 bfin_write_EMAC_SYSCTL(sysctl);
413 406
414 /* search for connect PHY device */ 407 /* search for connected PHY device */
415 for (i = 0; i < PHY_MAX_ADDR; i++) { 408 for (i = 0; i < PHY_MAX_ADDR; ++i) {
416 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; 409 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
417 410
418 if (!tmp_phydev) 411 if (!tmp_phydev)
@@ -429,13 +422,14 @@ static int mii_probe(struct net_device *dev)
429 return -ENODEV; 422 return -ENODEV;
430 } 423 }
431 424
432#if defined(CONFIG_BFIN_MAC_RMII) 425 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
433 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, 426 phy_mode != PHY_INTERFACE_MODE_MII) {
434 0, PHY_INTERFACE_MODE_RMII); 427 printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
435#else 428 return -EINVAL;
429 }
430
436 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, 431 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
437 0, PHY_INTERFACE_MODE_MII); 432 0, phy_mode);
438#endif
439 433
440 if (IS_ERR(phydev)) { 434 if (IS_ERR(phydev)) {
441 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 435 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -570,6 +564,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
570/**************************************************************************/ 564/**************************************************************************/
571void setup_system_regs(struct net_device *dev) 565void setup_system_regs(struct net_device *dev)
572{ 566{
567 struct bfin_mac_local *lp = netdev_priv(dev);
568 int i;
573 unsigned short sysctl; 569 unsigned short sysctl;
574 570
575 /* 571 /*
@@ -577,6 +573,15 @@ void setup_system_regs(struct net_device *dev)
577 * Configure checksum support and rcve frame word alignment 573 * Configure checksum support and rcve frame word alignment
578 */ 574 */
579 sysctl = bfin_read_EMAC_SYSCTL(); 575 sysctl = bfin_read_EMAC_SYSCTL();
576 /*
577 * check if interrupt is requested for any PHY,
578 * enable PHY interrupt only if needed
579 */
580 for (i = 0; i < PHY_MAX_ADDR; ++i)
581 if (lp->mii_bus->irq[i] != PHY_POLL)
582 break;
583 if (i < PHY_MAX_ADDR)
584 sysctl |= PHYIE;
580 sysctl |= RXDWA; 585 sysctl |= RXDWA;
581#if defined(BFIN_MAC_CSUM_OFFLOAD) 586#if defined(BFIN_MAC_CSUM_OFFLOAD)
582 sysctl |= RXCKS; 587 sysctl |= RXCKS;
@@ -1203,7 +1208,7 @@ static void bfin_mac_disable(void)
1203/* 1208/*
1204 * Enable Interrupts, Receive, and Transmit 1209 * Enable Interrupts, Receive, and Transmit
1205 */ 1210 */
1206static int bfin_mac_enable(void) 1211static int bfin_mac_enable(struct phy_device *phydev)
1207{ 1212{
1208 int ret; 1213 int ret;
1209 u32 opmode; 1214 u32 opmode;
@@ -1233,12 +1238,13 @@ static int bfin_mac_enable(void)
1233 opmode |= DRO | DC | PSF; 1238 opmode |= DRO | DC | PSF;
1234 opmode |= RE; 1239 opmode |= RE;
1235 1240
1236#if defined(CONFIG_BFIN_MAC_RMII) 1241 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1237 opmode |= RMII; /* For Now only 100MBit are supported */ 1242 opmode |= RMII; /* For Now only 100MBit are supported */
1238#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 1243#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
1239 opmode |= TE; 1244 opmode |= TE;
1240#endif
1241#endif 1245#endif
1246 }
1247
1242 /* Turn on the EMAC rx */ 1248 /* Turn on the EMAC rx */
1243 bfin_write_EMAC_OPMODE(opmode); 1249 bfin_write_EMAC_OPMODE(opmode);
1244 1250
@@ -1270,7 +1276,7 @@ static void bfin_mac_timeout(struct net_device *dev)
1270 if (netif_queue_stopped(lp->ndev)) 1276 if (netif_queue_stopped(lp->ndev))
1271 netif_wake_queue(lp->ndev); 1277 netif_wake_queue(lp->ndev);
1272 1278
1273 bfin_mac_enable(); 1279 bfin_mac_enable(lp->phydev);
1274 1280
1275 /* We can accept TX packets again */ 1281 /* We can accept TX packets again */
1276 dev->trans_start = jiffies; /* prevent tx timeout */ 1282 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1342,11 +1348,19 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
1342 1348
1343static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1349static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1344{ 1350{
1351 struct bfin_mac_local *lp = netdev_priv(netdev);
1352
1353 if (!netif_running(netdev))
1354 return -EINVAL;
1355
1345 switch (cmd) { 1356 switch (cmd) {
1346 case SIOCSHWTSTAMP: 1357 case SIOCSHWTSTAMP:
1347 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd); 1358 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1348 default: 1359 default:
1349 return -EOPNOTSUPP; 1360 if (lp->phydev)
1361 return phy_mii_ioctl(lp->phydev, ifr, cmd);
1362 else
1363 return -EOPNOTSUPP;
1350 } 1364 }
1351} 1365}
1352 1366
@@ -1394,7 +1408,7 @@ static int bfin_mac_open(struct net_device *dev)
1394 setup_mac_addr(dev->dev_addr); 1408 setup_mac_addr(dev->dev_addr);
1395 1409
1396 bfin_mac_disable(); 1410 bfin_mac_disable();
1397 ret = bfin_mac_enable(); 1411 ret = bfin_mac_enable(lp->phydev);
1398 if (ret) 1412 if (ret)
1399 return ret; 1413 return ret;
1400 pr_debug("hardware init finished\n"); 1414 pr_debug("hardware init finished\n");
@@ -1450,6 +1464,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1450 struct net_device *ndev; 1464 struct net_device *ndev;
1451 struct bfin_mac_local *lp; 1465 struct bfin_mac_local *lp;
1452 struct platform_device *pd; 1466 struct platform_device *pd;
1467 struct bfin_mii_bus_platform_data *mii_bus_data;
1453 int rc; 1468 int rc;
1454 1469
1455 ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); 1470 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
@@ -1501,11 +1516,12 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1501 if (!lp->mii_bus) { 1516 if (!lp->mii_bus) {
1502 dev_err(&pdev->dev, "Cannot get mii_bus!\n"); 1517 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1503 rc = -ENODEV; 1518 rc = -ENODEV;
1504 goto out_err_mii_bus_probe; 1519 goto out_err_probe_mac;
1505 } 1520 }
1506 lp->mii_bus->priv = ndev; 1521 lp->mii_bus->priv = ndev;
1522 mii_bus_data = pd->dev.platform_data;
1507 1523
1508 rc = mii_probe(ndev); 1524 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1509 if (rc) { 1525 if (rc) {
1510 dev_err(&pdev->dev, "MII Probe failed!\n"); 1526 dev_err(&pdev->dev, "MII Probe failed!\n");
1511 goto out_err_mii_probe; 1527 goto out_err_mii_probe;
@@ -1552,8 +1568,6 @@ out_err_request_irq:
1552out_err_mii_probe: 1568out_err_mii_probe:
1553 mdiobus_unregister(lp->mii_bus); 1569 mdiobus_unregister(lp->mii_bus);
1554 mdiobus_free(lp->mii_bus); 1570 mdiobus_free(lp->mii_bus);
1555out_err_mii_bus_probe:
1556 peripheral_free_list(pin_req);
1557out_err_probe_mac: 1571out_err_probe_mac:
1558 platform_set_drvdata(pdev, NULL); 1572 platform_set_drvdata(pdev, NULL);
1559 free_netdev(ndev); 1573 free_netdev(ndev);
@@ -1576,8 +1590,6 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
1576 1590
1577 free_netdev(ndev); 1591 free_netdev(ndev);
1578 1592
1579 peripheral_free_list(pin_req);
1580
1581 return 0; 1593 return 0;
1582} 1594}
1583 1595
@@ -1623,12 +1635,21 @@ static int bfin_mac_resume(struct platform_device *pdev)
1623static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) 1635static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1624{ 1636{
1625 struct mii_bus *miibus; 1637 struct mii_bus *miibus;
1638 struct bfin_mii_bus_platform_data *mii_bus_pd;
1639 const unsigned short *pin_req;
1626 int rc, i; 1640 int rc, i;
1627 1641
1642 mii_bus_pd = dev_get_platdata(&pdev->dev);
1643 if (!mii_bus_pd) {
1644 dev_err(&pdev->dev, "No peripherals in platform data!\n");
1645 return -EINVAL;
1646 }
1647
1628 /* 1648 /*
1629 * We are setting up a network card, 1649 * We are setting up a network card,
1630 * so set the GPIO pins to Ethernet mode 1650 * so set the GPIO pins to Ethernet mode
1631 */ 1651 */
1652 pin_req = mii_bus_pd->mac_peripherals;
1632 rc = peripheral_request_list(pin_req, DRV_NAME); 1653 rc = peripheral_request_list(pin_req, DRV_NAME);
1633 if (rc) { 1654 if (rc) {
1634 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
@@ -1645,13 +1666,30 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1645 1666
1646 miibus->parent = &pdev->dev; 1667 miibus->parent = &pdev->dev;
1647 miibus->name = "bfin_mii_bus"; 1668 miibus->name = "bfin_mii_bus";
1669 miibus->phy_mask = mii_bus_pd->phy_mask;
1670
1648 snprintf(miibus->id, MII_BUS_ID_SIZE, "0"); 1671 snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
1649 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1672 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1650 if (miibus->irq == NULL) 1673 if (!miibus->irq)
1651 goto out_err_alloc; 1674 goto out_err_irq_alloc;
1652 for (i = 0; i < PHY_MAX_ADDR; ++i) 1675
1676 for (i = rc; i < PHY_MAX_ADDR; ++i)
1653 miibus->irq[i] = PHY_POLL; 1677 miibus->irq[i] = PHY_POLL;
1654 1678
1679 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1680 if (rc != mii_bus_pd->phydev_number)
1681 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1682 mii_bus_pd->phydev_number);
1683 for (i = 0; i < rc; ++i) {
1684 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1685 if (phyaddr < PHY_MAX_ADDR)
1686 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1687 else
1688 dev_err(&pdev->dev,
1689 "Invalid PHY address %i for phydev %i\n",
1690 phyaddr, i);
1691 }
1692
1655 rc = mdiobus_register(miibus); 1693 rc = mdiobus_register(miibus);
1656 if (rc) { 1694 if (rc) {
1657 dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); 1695 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
@@ -1663,6 +1701,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1663 1701
1664out_err_mdiobus_register: 1702out_err_mdiobus_register:
1665 kfree(miibus->irq); 1703 kfree(miibus->irq);
1704out_err_irq_alloc:
1666 mdiobus_free(miibus); 1705 mdiobus_free(miibus);
1667out_err_alloc: 1706out_err_alloc:
1668 peripheral_free_list(pin_req); 1707 peripheral_free_list(pin_req);
@@ -1673,11 +1712,15 @@ out_err_alloc:
1673static int __devexit bfin_mii_bus_remove(struct platform_device *pdev) 1712static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1674{ 1713{
1675 struct mii_bus *miibus = platform_get_drvdata(pdev); 1714 struct mii_bus *miibus = platform_get_drvdata(pdev);
1715 struct bfin_mii_bus_platform_data *mii_bus_pd =
1716 dev_get_platdata(&pdev->dev);
1717
1676 platform_set_drvdata(pdev, NULL); 1718 platform_set_drvdata(pdev, NULL);
1677 mdiobus_unregister(miibus); 1719 mdiobus_unregister(miibus);
1678 kfree(miibus->irq); 1720 kfree(miibus->irq);
1679 mdiobus_free(miibus); 1721 mdiobus_free(miibus);
1680 peripheral_free_list(pin_req); 1722 peripheral_free_list(mii_bus_pd->mac_peripherals);
1723
1681 return 0; 1724 return 0;
1682} 1725}
1683 1726
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 04e4050df18b..aed68bed2365 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -14,6 +14,8 @@
14#include <linux/clocksource.h> 14#include <linux/clocksource.h>
15#include <linux/timecompare.h> 15#include <linux/timecompare.h>
16#include <linux/timer.h> 16#include <linux/timer.h>
17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h>
17 19
18#define BFIN_MAC_CSUM_OFFLOAD 20#define BFIN_MAC_CSUM_OFFLOAD
19 21
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 000000000000..e92b2b6cd8c4
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22
23#include "davinci_cpdma.h"
24
25/* DMA Registers */
26#define CPDMA_TXIDVER 0x00
27#define CPDMA_TXCONTROL 0x04
28#define CPDMA_TXTEARDOWN 0x08
29#define CPDMA_RXIDVER 0x10
30#define CPDMA_RXCONTROL 0x14
31#define CPDMA_SOFTRESET 0x1c
32#define CPDMA_RXTEARDOWN 0x18
33#define CPDMA_TXINTSTATRAW 0x80
34#define CPDMA_TXINTSTATMASKED 0x84
35#define CPDMA_TXINTMASKSET 0x88
36#define CPDMA_TXINTMASKCLEAR 0x8c
37#define CPDMA_MACINVECTOR 0x90
38#define CPDMA_MACEOIVECTOR 0x94
39#define CPDMA_RXINTSTATRAW 0xa0
40#define CPDMA_RXINTSTATMASKED 0xa4
41#define CPDMA_RXINTMASKSET 0xa8
42#define CPDMA_RXINTMASKCLEAR 0xac
43#define CPDMA_DMAINTSTATRAW 0xb0
44#define CPDMA_DMAINTSTATMASKED 0xb4
45#define CPDMA_DMAINTMASKSET 0xb8
46#define CPDMA_DMAINTMASKCLEAR 0xbc
47#define CPDMA_DMAINT_HOSTERR BIT(1)
48
49/* the following exist only if has_ext_regs is set */
50#define CPDMA_DMACONTROL 0x20
51#define CPDMA_DMASTATUS 0x24
52#define CPDMA_RXBUFFOFS 0x28
53#define CPDMA_EM_CONTROL 0x2c
54
55/* Descriptor mode bits */
56#define CPDMA_DESC_SOP BIT(31)
57#define CPDMA_DESC_EOP BIT(30)
58#define CPDMA_DESC_OWNER BIT(29)
59#define CPDMA_DESC_EOQ BIT(28)
60#define CPDMA_DESC_TD_COMPLETE BIT(27)
61#define CPDMA_DESC_PASS_CRC BIT(26)
62
63#define CPDMA_TEARDOWN_VALUE 0xfffffffc
64
65struct cpdma_desc {
66 /* hardware fields */
67 u32 hw_next;
68 u32 hw_buffer;
69 u32 hw_len;
70 u32 hw_mode;
71 /* software fields */
72 void *sw_token;
73 u32 sw_buffer;
74 u32 sw_len;
75};
76
77struct cpdma_desc_pool {
78 u32 phys;
79 void __iomem *iomap; /* ioremap map */
80 void *cpumap; /* dma_alloc map */
81 int desc_size, mem_size;
82 int num_desc, used_desc;
83 unsigned long *bitmap;
84 struct device *dev;
85 spinlock_t lock;
86};
87
88enum cpdma_state {
89 CPDMA_STATE_IDLE,
90 CPDMA_STATE_ACTIVE,
91 CPDMA_STATE_TEARDOWN,
92};
93
94const char *cpdma_state_str[] = { "idle", "active", "teardown" };
95
96struct cpdma_ctlr {
97 enum cpdma_state state;
98 struct cpdma_params params;
99 struct device *dev;
100 struct cpdma_desc_pool *pool;
101 spinlock_t lock;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
103};
104
105struct cpdma_chan {
106 enum cpdma_state state;
107 struct cpdma_ctlr *ctlr;
108 int chan_num;
109 spinlock_t lock;
110 struct cpdma_desc __iomem *head, *tail;
111 int count;
112 void __iomem *hdp, *cp, *rxfree;
113 u32 mask;
114 cpdma_handler_fn handler;
115 enum dma_data_direction dir;
116 struct cpdma_chan_stats stats;
117 /* offsets into dmaregs */
118 int int_set, int_clear, td;
119};
120
121/* The following make access to common cpdma_ctlr params more readable */
122#define dmaregs params.dmaregs
123#define num_chan params.num_chan
124
125/* various accessors */
126#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
127#define chan_read(chan, fld) __raw_readl((chan)->fld)
128#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
129#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
130#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
131#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
132
133/*
134 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
135 * emac) have dedicated on-chip memory for these descriptors. Some other
136 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
137 * abstract out these details
138 */
139static struct cpdma_desc_pool *
140cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
141{
142 int bitmap_size;
143 struct cpdma_desc_pool *pool;
144
145 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
146 if (!pool)
147 return NULL;
148
149 spin_lock_init(&pool->lock);
150
151 pool->dev = dev;
152 pool->mem_size = size;
153 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
154 pool->num_desc = size / pool->desc_size;
155
156 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
157 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
158 if (!pool->bitmap)
159 goto fail;
160
161 if (phys) {
162 pool->phys = phys;
163 pool->iomap = ioremap(phys, size);
164 } else {
165 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
166 GFP_KERNEL);
167 pool->iomap = (void __force __iomem *)pool->cpumap;
168 }
169
170 if (pool->iomap)
171 return pool;
172
173fail:
174 kfree(pool->bitmap);
175 kfree(pool);
176 return NULL;
177}
178
179static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
180{
181 unsigned long flags;
182
183 if (!pool)
184 return;
185
186 spin_lock_irqsave(&pool->lock, flags);
187 WARN_ON(pool->used_desc);
188 kfree(pool->bitmap);
189 if (pool->cpumap) {
190 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
191 pool->phys);
192 } else {
193 iounmap(pool->iomap);
194 }
195 spin_unlock_irqrestore(&pool->lock, flags);
196 kfree(pool);
197}
198
199static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
200 struct cpdma_desc __iomem *desc)
201{
202 if (!desc)
203 return 0;
204 return pool->phys + (__force dma_addr_t)desc -
205 (__force dma_addr_t)pool->iomap;
206}
207
208static inline struct cpdma_desc __iomem *
209desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
210{
211 return dma ? pool->iomap + dma - pool->phys : NULL;
212}
213
214static struct cpdma_desc __iomem *
215cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
216{
217 unsigned long flags;
218 int index;
219 struct cpdma_desc __iomem *desc = NULL;
220
221 spin_lock_irqsave(&pool->lock, flags);
222
223 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
224 num_desc, 0);
225 if (index < pool->num_desc) {
226 bitmap_set(pool->bitmap, index, num_desc);
227 desc = pool->iomap + pool->desc_size * index;
228 pool->used_desc++;
229 }
230
231 spin_unlock_irqrestore(&pool->lock, flags);
232 return desc;
233}
234
235static void cpdma_desc_free(struct cpdma_desc_pool *pool,
236 struct cpdma_desc __iomem *desc, int num_desc)
237{
238 unsigned long flags, index;
239
240 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
241 pool->desc_size;
242 spin_lock_irqsave(&pool->lock, flags);
243 bitmap_clear(pool->bitmap, index, num_desc);
244 pool->used_desc--;
245 spin_unlock_irqrestore(&pool->lock, flags);
246}
247
248struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
249{
250 struct cpdma_ctlr *ctlr;
251
252 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
253 if (!ctlr)
254 return NULL;
255
256 ctlr->state = CPDMA_STATE_IDLE;
257 ctlr->params = *params;
258 ctlr->dev = params->dev;
259 spin_lock_init(&ctlr->lock);
260
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys,
263 ctlr->params.desc_mem_size,
264 ctlr->params.desc_align);
265 if (!ctlr->pool) {
266 kfree(ctlr);
267 return NULL;
268 }
269
270 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
271 ctlr->num_chan = CPDMA_MAX_CHANNELS;
272 return ctlr;
273}
274
275int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
276{
277 unsigned long flags;
278 int i;
279
280 spin_lock_irqsave(&ctlr->lock, flags);
281 if (ctlr->state != CPDMA_STATE_IDLE) {
282 spin_unlock_irqrestore(&ctlr->lock, flags);
283 return -EBUSY;
284 }
285
286 if (ctlr->params.has_soft_reset) {
287 unsigned long timeout = jiffies + HZ/10;
288
289 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
290 while (time_before(jiffies, timeout)) {
291 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
292 break;
293 }
294 WARN_ON(!time_before(jiffies, timeout));
295 }
296
297 for (i = 0; i < ctlr->num_chan; i++) {
298 __raw_writel(0, ctlr->params.txhdp + 4 * i);
299 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
300 __raw_writel(0, ctlr->params.txcp + 4 * i);
301 __raw_writel(0, ctlr->params.rxcp + 4 * i);
302 }
303
304 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
305 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
306
307 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
308 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
309
310 ctlr->state = CPDMA_STATE_ACTIVE;
311
312 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
313 if (ctlr->channels[i])
314 cpdma_chan_start(ctlr->channels[i]);
315 }
316 spin_unlock_irqrestore(&ctlr->lock, flags);
317 return 0;
318}
319
320int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
321{
322 unsigned long flags;
323 int i;
324
325 spin_lock_irqsave(&ctlr->lock, flags);
326 if (ctlr->state != CPDMA_STATE_ACTIVE) {
327 spin_unlock_irqrestore(&ctlr->lock, flags);
328 return -EINVAL;
329 }
330
331 ctlr->state = CPDMA_STATE_TEARDOWN;
332
333 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
334 if (ctlr->channels[i])
335 cpdma_chan_stop(ctlr->channels[i]);
336 }
337
338 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
339 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
340
341 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
342 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
343
344 ctlr->state = CPDMA_STATE_IDLE;
345
346 spin_unlock_irqrestore(&ctlr->lock, flags);
347 return 0;
348}
349
350int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
351{
352 struct device *dev = ctlr->dev;
353 unsigned long flags;
354 int i;
355
356 spin_lock_irqsave(&ctlr->lock, flags);
357
358 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
359
360 dev_info(dev, "CPDMA: txidver: %x",
361 dma_reg_read(ctlr, CPDMA_TXIDVER));
362 dev_info(dev, "CPDMA: txcontrol: %x",
363 dma_reg_read(ctlr, CPDMA_TXCONTROL));
364 dev_info(dev, "CPDMA: txteardown: %x",
365 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
366 dev_info(dev, "CPDMA: rxidver: %x",
367 dma_reg_read(ctlr, CPDMA_RXIDVER));
368 dev_info(dev, "CPDMA: rxcontrol: %x",
369 dma_reg_read(ctlr, CPDMA_RXCONTROL));
370 dev_info(dev, "CPDMA: softreset: %x",
371 dma_reg_read(ctlr, CPDMA_SOFTRESET));
372 dev_info(dev, "CPDMA: rxteardown: %x",
373 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
374 dev_info(dev, "CPDMA: txintstatraw: %x",
375 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
376 dev_info(dev, "CPDMA: txintstatmasked: %x",
377 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
378 dev_info(dev, "CPDMA: txintmaskset: %x",
379 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
380 dev_info(dev, "CPDMA: txintmaskclear: %x",
381 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
382 dev_info(dev, "CPDMA: macinvector: %x",
383 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
384 dev_info(dev, "CPDMA: maceoivector: %x",
385 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
386 dev_info(dev, "CPDMA: rxintstatraw: %x",
387 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
388 dev_info(dev, "CPDMA: rxintstatmasked: %x",
389 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
390 dev_info(dev, "CPDMA: rxintmaskset: %x",
391 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
392 dev_info(dev, "CPDMA: rxintmaskclear: %x",
393 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
394 dev_info(dev, "CPDMA: dmaintstatraw: %x",
395 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
396 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
397 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
398 dev_info(dev, "CPDMA: dmaintmaskset: %x",
399 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
400 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
401 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
402
403 if (!ctlr->params.has_ext_regs) {
404 dev_info(dev, "CPDMA: dmacontrol: %x",
405 dma_reg_read(ctlr, CPDMA_DMACONTROL));
406 dev_info(dev, "CPDMA: dmastatus: %x",
407 dma_reg_read(ctlr, CPDMA_DMASTATUS));
408 dev_info(dev, "CPDMA: rxbuffofs: %x",
409 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
410 }
411
412 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
413 if (ctlr->channels[i])
414 cpdma_chan_dump(ctlr->channels[i]);
415
416 spin_unlock_irqrestore(&ctlr->lock, flags);
417 return 0;
418}
419
420int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
421{
422 unsigned long flags;
423 int ret = 0, i;
424
425 if (!ctlr)
426 return -EINVAL;
427
428 spin_lock_irqsave(&ctlr->lock, flags);
429 if (ctlr->state != CPDMA_STATE_IDLE)
430 cpdma_ctlr_stop(ctlr);
431
432 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
433 if (ctlr->channels[i])
434 cpdma_chan_destroy(ctlr->channels[i]);
435 }
436
437 cpdma_desc_pool_destroy(ctlr->pool);
438 spin_unlock_irqrestore(&ctlr->lock, flags);
439 kfree(ctlr);
440 return ret;
441}
442
443int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
444{
445 unsigned long flags;
446 int i, reg;
447
448 spin_lock_irqsave(&ctlr->lock, flags);
449 if (ctlr->state != CPDMA_STATE_ACTIVE) {
450 spin_unlock_irqrestore(&ctlr->lock, flags);
451 return -EINVAL;
452 }
453
454 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
455 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
456
457 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
458 if (ctlr->channels[i])
459 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
460 }
461
462 spin_unlock_irqrestore(&ctlr->lock, flags);
463 return 0;
464}
465
466void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
467{
468 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
469}
470
471struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
472 cpdma_handler_fn handler)
473{
474 struct cpdma_chan *chan;
475 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
476 unsigned long flags;
477
478 if (__chan_linear(chan_num) >= ctlr->num_chan)
479 return NULL;
480
481 ret = -ENOMEM;
482 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
483 if (!chan)
484 goto err_chan_alloc;
485
486 spin_lock_irqsave(&ctlr->lock, flags);
487 ret = -EBUSY;
488 if (ctlr->channels[chan_num])
489 goto err_chan_busy;
490
491 chan->ctlr = ctlr;
492 chan->state = CPDMA_STATE_IDLE;
493 chan->chan_num = chan_num;
494 chan->handler = handler;
495
496 if (is_rx_chan(chan)) {
497 chan->hdp = ctlr->params.rxhdp + offset;
498 chan->cp = ctlr->params.rxcp + offset;
499 chan->rxfree = ctlr->params.rxfree + offset;
500 chan->int_set = CPDMA_RXINTMASKSET;
501 chan->int_clear = CPDMA_RXINTMASKCLEAR;
502 chan->td = CPDMA_RXTEARDOWN;
503 chan->dir = DMA_FROM_DEVICE;
504 } else {
505 chan->hdp = ctlr->params.txhdp + offset;
506 chan->cp = ctlr->params.txcp + offset;
507 chan->int_set = CPDMA_TXINTMASKSET;
508 chan->int_clear = CPDMA_TXINTMASKCLEAR;
509 chan->td = CPDMA_TXTEARDOWN;
510 chan->dir = DMA_TO_DEVICE;
511 }
512 chan->mask = BIT(chan_linear(chan));
513
514 spin_lock_init(&chan->lock);
515
516 ctlr->channels[chan_num] = chan;
517 spin_unlock_irqrestore(&ctlr->lock, flags);
518 return chan;
519
520err_chan_busy:
521 spin_unlock_irqrestore(&ctlr->lock, flags);
522 kfree(chan);
523err_chan_alloc:
524 return ERR_PTR(ret);
525}
526
527int cpdma_chan_destroy(struct cpdma_chan *chan)
528{
529 struct cpdma_ctlr *ctlr = chan->ctlr;
530 unsigned long flags;
531
532 if (!chan)
533 return -EINVAL;
534
535 spin_lock_irqsave(&ctlr->lock, flags);
536 if (chan->state != CPDMA_STATE_IDLE)
537 cpdma_chan_stop(chan);
538 ctlr->channels[chan->chan_num] = NULL;
539 spin_unlock_irqrestore(&ctlr->lock, flags);
540 kfree(chan);
541 return 0;
542}
543
544int cpdma_chan_get_stats(struct cpdma_chan *chan,
545 struct cpdma_chan_stats *stats)
546{
547 unsigned long flags;
548 if (!chan)
549 return -EINVAL;
550 spin_lock_irqsave(&chan->lock, flags);
551 memcpy(stats, &chan->stats, sizeof(*stats));
552 spin_unlock_irqrestore(&chan->lock, flags);
553 return 0;
554}
555
556int cpdma_chan_dump(struct cpdma_chan *chan)
557{
558 unsigned long flags;
559 struct device *dev = chan->ctlr->dev;
560
561 spin_lock_irqsave(&chan->lock, flags);
562
563 dev_info(dev, "channel %d (%s %d) state %s",
564 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
565 chan_linear(chan), cpdma_state_str[chan->state]);
566 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
567 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
568 if (chan->rxfree) {
569 dev_info(dev, "\trxfree: %x\n",
570 chan_read(chan, rxfree));
571 }
572
573 dev_info(dev, "\tstats head_enqueue: %d\n",
574 chan->stats.head_enqueue);
575 dev_info(dev, "\tstats tail_enqueue: %d\n",
576 chan->stats.tail_enqueue);
577 dev_info(dev, "\tstats pad_enqueue: %d\n",
578 chan->stats.pad_enqueue);
579 dev_info(dev, "\tstats misqueued: %d\n",
580 chan->stats.misqueued);
581 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
582 chan->stats.desc_alloc_fail);
583 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
584 chan->stats.pad_alloc_fail);
585 dev_info(dev, "\tstats runt_receive_buff: %d\n",
586 chan->stats.runt_receive_buff);
587 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
588 chan->stats.runt_transmit_buff);
589 dev_info(dev, "\tstats empty_dequeue: %d\n",
590 chan->stats.empty_dequeue);
591 dev_info(dev, "\tstats busy_dequeue: %d\n",
592 chan->stats.busy_dequeue);
593 dev_info(dev, "\tstats good_dequeue: %d\n",
594 chan->stats.good_dequeue);
595 dev_info(dev, "\tstats requeue: %d\n",
596 chan->stats.requeue);
597 dev_info(dev, "\tstats teardown_dequeue: %d\n",
598 chan->stats.teardown_dequeue);
599
600 spin_unlock_irqrestore(&chan->lock, flags);
601 return 0;
602}
603
604static void __cpdma_chan_submit(struct cpdma_chan *chan,
605 struct cpdma_desc __iomem *desc)
606{
607 struct cpdma_ctlr *ctlr = chan->ctlr;
608 struct cpdma_desc __iomem *prev = chan->tail;
609 struct cpdma_desc_pool *pool = ctlr->pool;
610 dma_addr_t desc_dma;
611 u32 mode;
612
613 desc_dma = desc_phys(pool, desc);
614
615 /* simple case - idle channel */
616 if (!chan->head) {
617 chan->stats.head_enqueue++;
618 chan->head = desc;
619 chan->tail = desc;
620 if (chan->state == CPDMA_STATE_ACTIVE)
621 chan_write(chan, hdp, desc_dma);
622 return;
623 }
624
625 /* first chain the descriptor at the tail of the list */
626 desc_write(prev, hw_next, desc_dma);
627 chan->tail = desc;
628 chan->stats.tail_enqueue++;
629
630 /* next check if EOQ has been triggered already */
631 mode = desc_read(prev, hw_mode);
632 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
633 (chan->state == CPDMA_STATE_ACTIVE)) {
634 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
635 chan_write(chan, hdp, desc_dma);
636 chan->stats.misqueued++;
637 }
638}
639
640int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
641 int len, gfp_t gfp_mask)
642{
643 struct cpdma_ctlr *ctlr = chan->ctlr;
644 struct cpdma_desc __iomem *desc;
645 dma_addr_t buffer;
646 unsigned long flags;
647 u32 mode;
648 int ret = 0;
649
650 spin_lock_irqsave(&chan->lock, flags);
651
652 if (chan->state == CPDMA_STATE_TEARDOWN) {
653 ret = -EINVAL;
654 goto unlock_ret;
655 }
656
657 desc = cpdma_desc_alloc(ctlr->pool, 1);
658 if (!desc) {
659 chan->stats.desc_alloc_fail++;
660 ret = -ENOMEM;
661 goto unlock_ret;
662 }
663
664 if (len < ctlr->params.min_packet_size) {
665 len = ctlr->params.min_packet_size;
666 chan->stats.runt_transmit_buff++;
667 }
668
669 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
670 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
671
672 desc_write(desc, hw_next, 0);
673 desc_write(desc, hw_buffer, buffer);
674 desc_write(desc, hw_len, len);
675 desc_write(desc, hw_mode, mode | len);
676 desc_write(desc, sw_token, token);
677 desc_write(desc, sw_buffer, buffer);
678 desc_write(desc, sw_len, len);
679
680 __cpdma_chan_submit(chan, desc);
681
682 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
683 chan_write(chan, rxfree, 1);
684
685 chan->count++;
686
687unlock_ret:
688 spin_unlock_irqrestore(&chan->lock, flags);
689 return ret;
690}
691
692static void __cpdma_chan_free(struct cpdma_chan *chan,
693 struct cpdma_desc __iomem *desc,
694 int outlen, int status)
695{
696 struct cpdma_ctlr *ctlr = chan->ctlr;
697 struct cpdma_desc_pool *pool = ctlr->pool;
698 dma_addr_t buff_dma;
699 int origlen;
700 void *token;
701
702 token = (void *)desc_read(desc, sw_token);
703 buff_dma = desc_read(desc, sw_buffer);
704 origlen = desc_read(desc, sw_len);
705
706 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
707 cpdma_desc_free(pool, desc, 1);
708 (*chan->handler)(token, outlen, status);
709}
710
711static int __cpdma_chan_process(struct cpdma_chan *chan)
712{
713 struct cpdma_ctlr *ctlr = chan->ctlr;
714 struct cpdma_desc __iomem *desc;
715 int status, outlen;
716 struct cpdma_desc_pool *pool = ctlr->pool;
717 dma_addr_t desc_dma;
718 unsigned long flags;
719
720 spin_lock_irqsave(&chan->lock, flags);
721
722 desc = chan->head;
723 if (!desc) {
724 chan->stats.empty_dequeue++;
725 status = -ENOENT;
726 goto unlock_ret;
727 }
728 desc_dma = desc_phys(pool, desc);
729
730 status = __raw_readl(&desc->hw_mode);
731 outlen = status & 0x7ff;
732 if (status & CPDMA_DESC_OWNER) {
733 chan->stats.busy_dequeue++;
734 status = -EBUSY;
735 goto unlock_ret;
736 }
737 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
738
739 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
740 chan_write(chan, cp, desc_dma);
741 chan->count--;
742 chan->stats.good_dequeue++;
743
744 if (status & CPDMA_DESC_EOQ) {
745 chan->stats.requeue++;
746 chan_write(chan, hdp, desc_phys(pool, chan->head));
747 }
748
749 spin_unlock_irqrestore(&chan->lock, flags);
750
751 __cpdma_chan_free(chan, desc, outlen, status);
752 return status;
753
754unlock_ret:
755 spin_unlock_irqrestore(&chan->lock, flags);
756 return status;
757}
758
759int cpdma_chan_process(struct cpdma_chan *chan, int quota)
760{
761 int used = 0, ret = 0;
762
763 if (chan->state != CPDMA_STATE_ACTIVE)
764 return -EINVAL;
765
766 while (used < quota) {
767 ret = __cpdma_chan_process(chan);
768 if (ret < 0)
769 break;
770 used++;
771 }
772 return used;
773}
774
775int cpdma_chan_start(struct cpdma_chan *chan)
776{
777 struct cpdma_ctlr *ctlr = chan->ctlr;
778 struct cpdma_desc_pool *pool = ctlr->pool;
779 unsigned long flags;
780
781 spin_lock_irqsave(&chan->lock, flags);
782 if (chan->state != CPDMA_STATE_IDLE) {
783 spin_unlock_irqrestore(&chan->lock, flags);
784 return -EBUSY;
785 }
786 if (ctlr->state != CPDMA_STATE_ACTIVE) {
787 spin_unlock_irqrestore(&chan->lock, flags);
788 return -EINVAL;
789 }
790 dma_reg_write(ctlr, chan->int_set, chan->mask);
791 chan->state = CPDMA_STATE_ACTIVE;
792 if (chan->head) {
793 chan_write(chan, hdp, desc_phys(pool, chan->head));
794 if (chan->rxfree)
795 chan_write(chan, rxfree, chan->count);
796 }
797
798 spin_unlock_irqrestore(&chan->lock, flags);
799 return 0;
800}
801
802int cpdma_chan_stop(struct cpdma_chan *chan)
803{
804 struct cpdma_ctlr *ctlr = chan->ctlr;
805 struct cpdma_desc_pool *pool = ctlr->pool;
806 unsigned long flags;
807 int ret;
808 unsigned long timeout;
809
810 spin_lock_irqsave(&chan->lock, flags);
811 if (chan->state != CPDMA_STATE_ACTIVE) {
812 spin_unlock_irqrestore(&chan->lock, flags);
813 return -EINVAL;
814 }
815
816 chan->state = CPDMA_STATE_TEARDOWN;
817 dma_reg_write(ctlr, chan->int_clear, chan->mask);
818
819 /* trigger teardown */
820 dma_reg_write(ctlr, chan->td, chan->chan_num);
821
822 /* wait for teardown complete */
823 timeout = jiffies + HZ/10; /* 100 msec */
824 while (time_before(jiffies, timeout)) {
825 u32 cp = chan_read(chan, cp);
826 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
827 break;
828 cpu_relax();
829 }
830 WARN_ON(!time_before(jiffies, timeout));
831 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
832
833 /* handle completed packets */
834 do {
835 ret = __cpdma_chan_process(chan);
836 if (ret < 0)
837 break;
838 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
839
840 /* remaining packets haven't been tx/rx'ed, clean them up */
841 while (chan->head) {
842 struct cpdma_desc __iomem *desc = chan->head;
843 dma_addr_t next_dma;
844
845 next_dma = desc_read(desc, hw_next);
846 chan->head = desc_from_phys(pool, next_dma);
847 chan->stats.teardown_dequeue++;
848
849 /* issue callback without locks held */
850 spin_unlock_irqrestore(&chan->lock, flags);
851 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
852 spin_lock_irqsave(&chan->lock, flags);
853 }
854
855 chan->state = CPDMA_STATE_IDLE;
856 spin_unlock_irqrestore(&chan->lock, flags);
857 return 0;
858}
859
860int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
861{
862 unsigned long flags;
863
864 spin_lock_irqsave(&chan->lock, flags);
865 if (chan->state != CPDMA_STATE_ACTIVE) {
866 spin_unlock_irqrestore(&chan->lock, flags);
867 return -EINVAL;
868 }
869
870 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
871 chan->mask);
872 spin_unlock_irqrestore(&chan->lock, flags);
873
874 return 0;
875}
876
877struct cpdma_control_info {
878 u32 reg;
879 u32 shift, mask;
880 int access;
881#define ACCESS_RO BIT(0)
882#define ACCESS_WO BIT(1)
883#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
884};
885
886struct cpdma_control_info controls[] = {
887 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
888 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
889 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
890 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
891 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
892 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
893 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
894 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
895 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
896 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
897 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
898};
899
900int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
901{
902 unsigned long flags;
903 struct cpdma_control_info *info = &controls[control];
904 int ret;
905
906 spin_lock_irqsave(&ctlr->lock, flags);
907
908 ret = -ENOTSUPP;
909 if (!ctlr->params.has_ext_regs)
910 goto unlock_ret;
911
912 ret = -EINVAL;
913 if (ctlr->state != CPDMA_STATE_ACTIVE)
914 goto unlock_ret;
915
916 ret = -ENOENT;
917 if (control < 0 || control >= ARRAY_SIZE(controls))
918 goto unlock_ret;
919
920 ret = -EPERM;
921 if ((info->access & ACCESS_RO) != ACCESS_RO)
922 goto unlock_ret;
923
924 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
925
926unlock_ret:
927 spin_unlock_irqrestore(&ctlr->lock, flags);
928 return ret;
929}
930
931int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
932{
933 unsigned long flags;
934 struct cpdma_control_info *info = &controls[control];
935 int ret;
936 u32 val;
937
938 spin_lock_irqsave(&ctlr->lock, flags);
939
940 ret = -ENOTSUPP;
941 if (!ctlr->params.has_ext_regs)
942 goto unlock_ret;
943
944 ret = -EINVAL;
945 if (ctlr->state != CPDMA_STATE_ACTIVE)
946 goto unlock_ret;
947
948 ret = -ENOENT;
949 if (control < 0 || control >= ARRAY_SIZE(controls))
950 goto unlock_ret;
951
952 ret = -EPERM;
953 if ((info->access & ACCESS_WO) != ACCESS_WO)
954 goto unlock_ret;
955
956 val = dma_reg_read(ctlr, info->reg);
957 val &= ~(info->mask << info->shift);
958 val |= (value & info->mask) << info->shift;
959 dma_reg_write(ctlr, info->reg, val);
960 ret = 0;
961
962unlock_ret:
963 spin_unlock_irqrestore(&ctlr->lock, flags);
964 return ret;
965}
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
new file mode 100644
index 000000000000..868e50ebde45
--- /dev/null
+++ b/drivers/net/davinci_cpdma.h
@@ -0,0 +1,108 @@
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#ifndef __DAVINCI_CPDMA_H__
16#define __DAVINCI_CPDMA_H__
17
18#define CPDMA_MAX_CHANNELS BITS_PER_LONG
19
20#define tx_chan_num(chan) (chan)
21#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
22#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
23#define is_tx_chan(chan) (!is_rx_chan(chan))
24#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
25#define chan_linear(chan) __chan_linear((chan)->chan_num)
26
27struct cpdma_params {
28 struct device *dev;
29 void __iomem *dmaregs;
30 void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
31 void __iomem *rxthresh, *rxfree;
32 int num_chan;
33 bool has_soft_reset;
34 int min_packet_size;
35 u32 desc_mem_phys;
36 int desc_mem_size;
37 int desc_align;
38
39 /*
40 * Some instances of embedded cpdma controllers have extra control and
41 * status registers. The following flag enables access to these
42 * "extended" registers.
43 */
44 bool has_ext_regs;
45};
46
47struct cpdma_chan_stats {
48 u32 head_enqueue;
49 u32 tail_enqueue;
50 u32 pad_enqueue;
51 u32 misqueued;
52 u32 desc_alloc_fail;
53 u32 pad_alloc_fail;
54 u32 runt_receive_buff;
55 u32 runt_transmit_buff;
56 u32 empty_dequeue;
57 u32 busy_dequeue;
58 u32 good_dequeue;
59 u32 requeue;
60 u32 teardown_dequeue;
61};
62
63struct cpdma_ctlr;
64struct cpdma_chan;
65
66typedef void (*cpdma_handler_fn)(void *token, int len, int status);
67
68struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
69int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
70int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
71int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
72int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
73
74struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
75 cpdma_handler_fn handler);
76int cpdma_chan_destroy(struct cpdma_chan *chan);
77int cpdma_chan_start(struct cpdma_chan *chan);
78int cpdma_chan_stop(struct cpdma_chan *chan);
79int cpdma_chan_dump(struct cpdma_chan *chan);
80
81int cpdma_chan_get_stats(struct cpdma_chan *chan,
82 struct cpdma_chan_stats *stats);
83int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
84 int len, gfp_t gfp_mask);
85int cpdma_chan_process(struct cpdma_chan *chan, int quota);
86
87int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
88void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
89int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
90
91enum cpdma_control {
92 CPDMA_CMD_IDLE, /* write-only */
93 CPDMA_COPY_ERROR_FRAMES, /* read-write */
94 CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
95 CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
96 CPDMA_TX_PRIO_FIXED, /* read-write */
97 CPDMA_STAT_IDLE, /* read-only */
98 CPDMA_STAT_TX_ERR_CHAN, /* read-only */
99 CPDMA_STAT_TX_ERR_CODE, /* read-only */
100 CPDMA_STAT_RX_ERR_CHAN, /* read-only */
101 CPDMA_STAT_RX_ERR_CODE, /* read-only */
102 CPDMA_RX_BUFFER_OFFSET, /* read-write */
103};
104
105int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
106int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
107
108#endif
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7fbd052ddb0a..2a628d17d178 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -63,6 +63,8 @@
63#include <asm/irq.h> 63#include <asm/irq.h>
64#include <asm/page.h> 64#include <asm/page.h>
65 65
66#include "davinci_cpdma.h"
67
66static int debug_level; 68static int debug_level;
67module_param(debug_level, int, 0); 69module_param(debug_level, int, 0);
68MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); 70MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
113#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) 115#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
114#define EMAC_DEF_TX_CH (0) /* Default 0th channel */ 116#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
115#define EMAC_DEF_RX_CH (0) /* Default 0th channel */ 117#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
116#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */ 118#define EMAC_DEF_RX_NUM_DESC (128)
117#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ 119#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
118#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ 120#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
119#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ 121#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
125/* EMAC register related defines */ 127/* EMAC register related defines */
126#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) 128#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
127#define EMAC_NUM_MULTICAST_BITS (64) 129#define EMAC_NUM_MULTICAST_BITS (64)
128#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
129#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) 130#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
130#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) 131#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
131#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) 132#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
212#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ 213#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
213 214
214/* EMAC Peripheral Device Register Memory Layout structure */ 215/* EMAC Peripheral Device Register Memory Layout structure */
215#define EMAC_TXIDVER 0x0
216#define EMAC_TXCONTROL 0x4
217#define EMAC_TXTEARDOWN 0x8
218#define EMAC_RXIDVER 0x10
219#define EMAC_RXCONTROL 0x14
220#define EMAC_RXTEARDOWN 0x18
221#define EMAC_TXINTSTATRAW 0x80
222#define EMAC_TXINTSTATMASKED 0x84
223#define EMAC_TXINTMASKSET 0x88
224#define EMAC_TXINTMASKCLEAR 0x8C
225#define EMAC_MACINVECTOR 0x90 216#define EMAC_MACINVECTOR 0x90
226 217
227#define EMAC_DM646X_MACEOIVECTOR 0x94 218#define EMAC_DM646X_MACEOIVECTOR 0x94
228 219
229#define EMAC_RXINTSTATRAW 0xA0
230#define EMAC_RXINTSTATMASKED 0xA4
231#define EMAC_RXINTMASKSET 0xA8
232#define EMAC_RXINTMASKCLEAR 0xAC
233#define EMAC_MACINTSTATRAW 0xB0 220#define EMAC_MACINTSTATRAW 0xB0
234#define EMAC_MACINTSTATMASKED 0xB4 221#define EMAC_MACINTSTATMASKED 0xB4
235#define EMAC_MACINTMASKSET 0xB8 222#define EMAC_MACINTMASKSET 0xB8
@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
256#define EMAC_MACADDRHI 0x504 243#define EMAC_MACADDRHI 0x504
257#define EMAC_MACINDEX 0x508 244#define EMAC_MACINDEX 0x508
258 245
259/* EMAC HDP and Completion registors */
260#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
261#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
262#define EMAC_TXCP(ch) (0x640 + (ch * 4))
263#define EMAC_RXCP(ch) (0x660 + (ch * 4))
264
265/* EMAC statistics registers */ 246/* EMAC statistics registers */
266#define EMAC_RXGOODFRAMES 0x200 247#define EMAC_RXGOODFRAMES 0x200
267#define EMAC_RXBCASTFRAMES 0x204 248#define EMAC_RXBCASTFRAMES 0x204
@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
303#define EMAC_DM644X_INTMIN_INTVL 0x1 284#define EMAC_DM644X_INTMIN_INTVL 0x1
304#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK) 285#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
305 286
306/* EMAC MDIO related */
307/* Mask & Control defines */
308#define MDIO_CONTROL_CLKDIV (0xFF)
309#define MDIO_CONTROL_ENABLE BIT(30)
310#define MDIO_USERACCESS_GO BIT(31)
311#define MDIO_USERACCESS_WRITE BIT(30)
312#define MDIO_USERACCESS_READ (0)
313#define MDIO_USERACCESS_REGADR (0x1F << 21)
314#define MDIO_USERACCESS_PHYADR (0x1F << 16)
315#define MDIO_USERACCESS_DATA (0xFFFF)
316#define MDIO_USERPHYSEL_LINKSEL BIT(7)
317#define MDIO_VER_MODID (0xFFFF << 16)
318#define MDIO_VER_REVMAJ (0xFF << 8)
319#define MDIO_VER_REVMIN (0xFF)
320
321#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
322#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
323#define MDIO_CONTROL (0x04)
324
325/* EMAC DM646X control module registers */ 287/* EMAC DM646X control module registers */
326#define EMAC_DM646X_CMINTCTRL 0x0C 288#define EMAC_DM646X_CMINTCTRL 0x0C
327#define EMAC_DM646X_CMRXINTEN 0x14 289#define EMAC_DM646X_CMRXINTEN 0x14
@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
345/* EMAC Stats Clear Mask */ 307/* EMAC Stats Clear Mask */
346#define EMAC_STATS_CLR_MASK (0xFFFFFFFF) 308#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
347 309
348/** net_buf_obj: EMAC network bufferdata structure
349 *
350 * EMAC network buffer data structure
351 */
352struct emac_netbufobj {
353 void *buf_token;
354 char *data_ptr;
355 int length;
356};
357
358/** net_pkt_obj: EMAC network packet data structure
359 *
360 * EMAC network packet data structure - supports buffer list (for future)
361 */
362struct emac_netpktobj {
363 void *pkt_token; /* data token may hold tx/rx chan id */
364 struct emac_netbufobj *buf_list; /* array of network buffer objects */
365 int num_bufs;
366 int pkt_length;
367};
368
369/** emac_tx_bd: EMAC TX Buffer descriptor data structure
370 *
371 * EMAC TX Buffer descriptor data structure
372 */
373struct emac_tx_bd {
374 int h_next;
375 int buff_ptr;
376 int off_b_len;
377 int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
378 struct emac_tx_bd __iomem *next;
379 void *buf_token;
380};
381
382/** emac_txch: EMAC TX Channel data structure
383 *
384 * EMAC TX Channel data structure
385 */
386struct emac_txch {
387 /* Config related */
388 u32 num_bd;
389 u32 service_max;
390
391 /* CPPI specific */
392 u32 alloc_size;
393 void __iomem *bd_mem;
394 struct emac_tx_bd __iomem *bd_pool_head;
395 struct emac_tx_bd __iomem *active_queue_head;
396 struct emac_tx_bd __iomem *active_queue_tail;
397 struct emac_tx_bd __iomem *last_hw_bdprocessed;
398 u32 queue_active;
399 u32 teardown_pending;
400 u32 *tx_complete;
401
402 /** statistics */
403 u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
404 u32 mis_queued_packets;
405 u32 queue_reinit;
406 u32 end_of_queue_add;
407 u32 out_of_tx_bd;
408 u32 no_active_pkts; /* IRQ when there were no packets to process */
409 u32 active_queue_count;
410};
411
412/** emac_rx_bd: EMAC RX Buffer descriptor data structure
413 *
414 * EMAC RX Buffer descriptor data structure
415 */
416struct emac_rx_bd {
417 int h_next;
418 int buff_ptr;
419 int off_b_len;
420 int mode;
421 struct emac_rx_bd __iomem *next;
422 void *data_ptr;
423 void *buf_token;
424};
425
426/** emac_rxch: EMAC RX Channel data structure
427 *
428 * EMAC RX Channel data structure
429 */
430struct emac_rxch {
431 /* configuration info */
432 u32 num_bd;
433 u32 service_max;
434 u32 buf_size;
435 char mac_addr[6];
436
437 /** CPPI specific */
438 u32 alloc_size;
439 void __iomem *bd_mem;
440 struct emac_rx_bd __iomem *bd_pool_head;
441 struct emac_rx_bd __iomem *active_queue_head;
442 struct emac_rx_bd __iomem *active_queue_tail;
443 u32 queue_active;
444 u32 teardown_pending;
445
446 /* packet and buffer objects */
447 struct emac_netpktobj pkt_queue;
448 struct emac_netbufobj buf_queue;
449
450 /** statistics */
451 u32 proc_count; /* number of times emac_rx_bdproc is called */
452 u32 processed_bd;
453 u32 recycled_bd;
454 u32 out_of_rx_bd;
455 u32 out_of_rx_buffers;
456 u32 queue_reinit;
457 u32 end_of_queue_add;
458 u32 end_of_queue;
459 u32 mis_queued_packets;
460};
461
462/* emac_priv: EMAC private data structure 310/* emac_priv: EMAC private data structure
463 * 311 *
464 * EMAC adapter private data structure 312 * EMAC adapter private data structure
@@ -469,17 +317,13 @@ struct emac_priv {
469 struct platform_device *pdev; 317 struct platform_device *pdev;
470 struct napi_struct napi; 318 struct napi_struct napi;
471 char mac_addr[6]; 319 char mac_addr[6];
472 spinlock_t tx_lock;
473 spinlock_t rx_lock;
474 void __iomem *remap_addr; 320 void __iomem *remap_addr;
475 u32 emac_base_phys; 321 u32 emac_base_phys;
476 void __iomem *emac_base; 322 void __iomem *emac_base;
477 void __iomem *ctrl_base; 323 void __iomem *ctrl_base;
478 void __iomem *emac_ctrl_ram; 324 struct cpdma_ctlr *dma;
479 u32 ctrl_ram_size; 325 struct cpdma_chan *txchan;
480 u32 hw_ram_addr; 326 struct cpdma_chan *rxchan;
481 struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
482 struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
483 u32 link; /* 1=link on, 0=link off */ 327 u32 link; /* 1=link on, 0=link off */
484 u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ 328 u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
485 u32 duplex; /* Link duplex: 0=Half, 1=Full */ 329 u32 duplex; /* Link duplex: 0=Half, 1=Full */
@@ -493,13 +337,7 @@ struct emac_priv {
493 u32 mac_hash2; 337 u32 mac_hash2;
494 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; 338 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
495 u32 rx_addr_type; 339 u32 rx_addr_type;
496 /* periodic timer required for MDIO polling */ 340 const char *phy_id;
497 struct timer_list periodic_timer;
498 u32 periodic_ticks;
499 u32 timer_active;
500 u32 phy_mask;
501 /* mii_bus,phy members */
502 struct mii_bus *mii_bus;
503 struct phy_device *phydev; 341 struct phy_device *phydev;
504 spinlock_t lock; 342 spinlock_t lock;
505 /*platform specific members*/ 343 /*platform specific members*/
@@ -510,19 +348,6 @@ struct emac_priv {
510/* clock frequency for EMAC */ 348/* clock frequency for EMAC */
511static struct clk *emac_clk; 349static struct clk *emac_clk;
512static unsigned long emac_bus_frequency; 350static unsigned long emac_bus_frequency;
513static unsigned long mdio_max_freq;
514
515#define emac_virt_to_phys(addr, priv) \
516 (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
517 + priv->hw_ram_addr)
518
519/* Cache macros - Packet buffers would be from skb pool which is cached */
520#define EMAC_VIRT_NOCACHE(addr) (addr)
521
522/* DM644x does not have BD's in cached memory - so no cache functions */
523#define BD_CACHE_INVALIDATE(addr, size)
524#define BD_CACHE_WRITEBACK(addr, size)
525#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
526 351
527/* EMAC TX Host Error description strings */ 352/* EMAC TX Host Error description strings */
528static char *emac_txhost_errcodes[16] = { 353static char *emac_txhost_errcodes[16] = {
@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
548#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg))) 373#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
549#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) 374#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
550 375
551#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
552#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
553
554/** 376/**
555 * emac_dump_regs: Dump important EMAC registers to debug terminal 377 * emac_dump_regs: Dump important EMAC registers to debug terminal
556 * @priv: The DaVinci EMAC private adapter structure 378 * @priv: The DaVinci EMAC private adapter structure
@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
569 emac_ctrl_read(EMAC_CTRL_EWCTL), 391 emac_ctrl_read(EMAC_CTRL_EWCTL),
570 emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); 392 emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
571 } 393 }
572 dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
573 emac_read(EMAC_TXIDVER),
574 ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
575 emac_read(EMAC_RXIDVER),
576 ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
577 dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
578 "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
579 emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
580 dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
581 "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
582 emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
583 dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
584 "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
585 emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
586 dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", 394 dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
587 emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); 395 emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
588 dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ 396 dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
591 dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ 399 dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
592 "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), 400 "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
593 emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); 401 emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
594 dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
595 emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
596 dev_info(emac_dev, "EMAC Statistics\n"); 402 dev_info(emac_dev, "EMAC Statistics\n");
597 dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", 403 dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
598 emac_read(EMAC_RXGOODFRAMES)); 404 emac_read(EMAC_RXGOODFRAMES));
@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
654 emac_read(EMAC_RXMOFOVERRUNS)); 460 emac_read(EMAC_RXMOFOVERRUNS));
655 dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", 461 dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
656 emac_read(EMAC_RXDMAOVERRUNS)); 462 emac_read(EMAC_RXDMAOVERRUNS));
463
464 cpdma_ctlr_dump(priv->dma);
657} 465}
658 466
659/*************************************************************************
660 * EMAC MDIO/Phy Functionality
661 *************************************************************************/
662/** 467/**
663 * emac_get_drvinfo: Get EMAC driver information 468 * emac_get_drvinfo: Get EMAC driver information
664 * @ndev: The DaVinci EMAC network adapter 469 * @ndev: The DaVinci EMAC network adapter
@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
686 struct ethtool_cmd *ecmd) 491 struct ethtool_cmd *ecmd)
687{ 492{
688 struct emac_priv *priv = netdev_priv(ndev); 493 struct emac_priv *priv = netdev_priv(ndev);
689 if (priv->phy_mask) 494 if (priv->phydev)
690 return phy_ethtool_gset(priv->phydev, ecmd); 495 return phy_ethtool_gset(priv->phydev, ecmd);
691 else 496 else
692 return -EOPNOTSUPP; 497 return -EOPNOTSUPP;
@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
704static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 509static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
705{ 510{
706 struct emac_priv *priv = netdev_priv(ndev); 511 struct emac_priv *priv = netdev_priv(ndev);
707 if (priv->phy_mask) 512 if (priv->phydev)
708 return phy_ethtool_sset(priv->phydev, ecmd); 513 return phy_ethtool_sset(priv->phydev, ecmd);
709 else 514 else
710 return -EOPNOTSUPP; 515 return -EOPNOTSUPP;
@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
841 mac_control = emac_read(EMAC_MACCONTROL); 646 mac_control = emac_read(EMAC_MACCONTROL);
842 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? 647 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
843 DUPLEX_FULL : DUPLEX_HALF; 648 DUPLEX_FULL : DUPLEX_HALF;
844 if (priv->phy_mask) 649 if (priv->phydev)
845 new_duplex = priv->phydev->duplex; 650 new_duplex = priv->phydev->duplex;
846 else 651 else
847 new_duplex = DUPLEX_FULL; 652 new_duplex = DUPLEX_FULL;
@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
1184 return IRQ_HANDLED; 989 return IRQ_HANDLED;
1185} 990}
1186 991
1187/** EMAC on-chip buffer descriptor memory 992static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
1188 *
1189 * WARNING: Please note that the on chip memory is used for both TX and RX
1190 * buffer descriptor queues and is equally divided between TX and RX desc's
1191 * If the number of TX or RX descriptors change this memory pointers need
1192 * to be adjusted. If external memory is allocated then these pointers can
1193 * pointer to the memory
1194 *
1195 */
1196#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
1197#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
1198 (((priv)->ctrl_ram_size) >> 1))
1199
1200/**
1201 * emac_init_txch: TX channel initialization
1202 * @priv: The DaVinci EMAC private adapter structure
1203 * @ch: RX channel number
1204 *
1205 * Called during device init to setup a TX channel (allocate buffer desc
1206 * create free pool and keep ready for transmission
1207 *
1208 * Returns success(0) or mem alloc failures error code
1209 */
1210static int emac_init_txch(struct emac_priv *priv, u32 ch)
1211{
1212 struct device *emac_dev = &priv->ndev->dev;
1213 u32 cnt, bd_size;
1214 void __iomem *mem;
1215 struct emac_tx_bd __iomem *curr_bd;
1216 struct emac_txch *txch = NULL;
1217
1218 txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
1219 if (NULL == txch) {
1220 dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
1221 return -ENOMEM;
1222 }
1223 priv->txch[ch] = txch;
1224 txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
1225 txch->active_queue_head = NULL;
1226 txch->active_queue_tail = NULL;
1227 txch->queue_active = 0;
1228 txch->teardown_pending = 0;
1229
1230 /* allocate memory for TX CPPI channel on a 4 byte boundry */
1231 txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
1232 GFP_KERNEL);
1233 if (NULL == txch->tx_complete) {
1234 dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
1235 kfree(txch);
1236 return -ENOMEM;
1237 }
1238
1239 /* allocate buffer descriptor pool align every BD on four word
1240 * boundry for future requirements */
1241 bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
1242 txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
1243 txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
1244
1245 /* alloc TX BD memory */
1246 txch->bd_mem = EMAC_TX_BD_MEM(priv);
1247 __memzero((void __force *)txch->bd_mem, txch->alloc_size);
1248
1249 /* initialize the BD linked list */
1250 mem = (void __force __iomem *)
1251 (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
1252 txch->bd_pool_head = NULL;
1253 for (cnt = 0; cnt < txch->num_bd; cnt++) {
1254 curr_bd = mem + (cnt * bd_size);
1255 curr_bd->next = txch->bd_pool_head;
1256 txch->bd_pool_head = curr_bd;
1257 }
1258
1259 /* reset statistics counters */
1260 txch->out_of_tx_bd = 0;
1261 txch->no_active_pkts = 0;
1262 txch->active_queue_count = 0;
1263
1264 return 0;
1265}
1266
1267/**
1268 * emac_cleanup_txch: Book-keep function to clean TX channel resources
1269 * @priv: The DaVinci EMAC private adapter structure
1270 * @ch: TX channel number
1271 *
1272 * Called to clean up TX channel resources
1273 *
1274 */
1275static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
1276{ 993{
1277 struct emac_txch *txch = priv->txch[ch]; 994 struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
1278 995 if (WARN_ON(!skb))
1279 if (txch) { 996 return NULL;
1280 if (txch->bd_mem) 997 skb->dev = priv->ndev;
1281 txch->bd_mem = NULL; 998 skb_reserve(skb, NET_IP_ALIGN);
1282 kfree(txch->tx_complete); 999 return skb;
1283 kfree(txch);
1284 priv->txch[ch] = NULL;
1285 }
1286} 1000}
1287 1001
1288/** 1002static void emac_rx_handler(void *token, int len, int status)
1289 * emac_net_tx_complete: TX packet completion function
1290 * @priv: The DaVinci EMAC private adapter structure
1291 * @net_data_tokens: packet token - skb pointer
1292 * @num_tokens: number of skb's to free
1293 * @ch: TX channel number
1294 *
1295 * Frees the skb once packet is transmitted
1296 *
1297 */
1298static int emac_net_tx_complete(struct emac_priv *priv,
1299 void **net_data_tokens,
1300 int num_tokens, u32 ch)
1301{ 1003{
1302 struct net_device *ndev = priv->ndev; 1004 struct sk_buff *skb = token;
1303 u32 cnt; 1005 struct net_device *ndev = skb->dev;
1304 1006 struct emac_priv *priv = netdev_priv(ndev);
1305 if (unlikely(num_tokens && netif_queue_stopped(ndev))) 1007 struct device *emac_dev = &ndev->dev;
1306 netif_start_queue(ndev); 1008 int ret;
1307 for (cnt = 0; cnt < num_tokens; cnt++) { 1009
1308 struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt]; 1010 /* free and bail if we are shutting down */
1309 if (skb == NULL) 1011 if (unlikely(!netif_running(ndev))) {
1310 continue;
1311 ndev->stats.tx_packets++;
1312 ndev->stats.tx_bytes += skb->len;
1313 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return;
1314 } 1014 }
1315 return 0;
1316}
1317
1318/**
1319 * emac_txch_teardown: TX channel teardown
1320 * @priv: The DaVinci EMAC private adapter structure
1321 * @ch: TX channel number
1322 *
1323 * Called to teardown TX channel
1324 *
1325 */
1326static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
1327{
1328 struct device *emac_dev = &priv->ndev->dev;
1329 u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
1330 struct emac_txch *txch = priv->txch[ch];
1331 struct emac_tx_bd __iomem *curr_bd;
1332
1333 while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
1334 EMAC_TEARDOWN_VALUE) {
1335 /* wait till tx teardown complete */
1336 cpu_relax(); /* TODO: check if this helps ... */
1337 --teardown_cnt;
1338 if (0 == teardown_cnt) {
1339 dev_err(emac_dev, "EMAC: TX teardown aborted\n");
1340 break;
1341 }
1342 }
1343 emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
1344
1345 /* process sent packets and return skb's to upper layer */
1346 if (1 == txch->queue_active) {
1347 curr_bd = txch->active_queue_head;
1348 while (curr_bd != NULL) {
1349 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1350 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1351 DMA_TO_DEVICE);
1352
1353 emac_net_tx_complete(priv, (void __force *)
1354 &curr_bd->buf_token, 1, ch);
1355 if (curr_bd != txch->active_queue_tail)
1356 curr_bd = curr_bd->next;
1357 else
1358 break;
1359 }
1360 txch->bd_pool_head = txch->active_queue_head;
1361 txch->active_queue_head =
1362 txch->active_queue_tail = NULL;
1363 }
1364}
1365 1015
1366/** 1016 /* recycle on recieve error */
1367 * emac_stop_txch: Stop TX channel operation 1017 if (status < 0) {
1368 * @priv: The DaVinci EMAC private adapter structure 1018 ndev->stats.rx_errors++;
1369 * @ch: TX channel number 1019 goto recycle;
1370 *
1371 * Called to stop TX channel operation
1372 *
1373 */
1374static void emac_stop_txch(struct emac_priv *priv, u32 ch)
1375{
1376 struct emac_txch *txch = priv->txch[ch];
1377
1378 if (txch) {
1379 txch->teardown_pending = 1;
1380 emac_write(EMAC_TXTEARDOWN, 0);
1381 emac_txch_teardown(priv, ch);
1382 txch->teardown_pending = 0;
1383 emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
1384 } 1020 }
1385}
1386 1021
1387/** 1022 /* feed received packet up the stack */
1388 * emac_tx_bdproc: TX buffer descriptor (packet) processing 1023 skb_put(skb, len);
1389 * @priv: The DaVinci EMAC private adapter structure 1024 skb->protocol = eth_type_trans(skb, ndev);
1390 * @ch: TX channel number to process buffer descriptors for 1025 netif_receive_skb(skb);
1391 * @budget: number of packets allowed to process 1026 ndev->stats.rx_bytes += len;
1392 * @pending: indication to caller that packets are pending to process 1027 ndev->stats.rx_packets++;
1393 *
1394 * Processes TX buffer descriptors after packets are transmitted - checks
1395 * ownership bit on the TX * descriptor and requeues it to free pool & frees
1396 * the SKB buffer. Only "budget" number of packets are processed and
1397 * indication of pending packets provided to the caller
1398 *
1399 * Returns number of packets processed
1400 */
1401static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1402{
1403 struct device *emac_dev = &priv->ndev->dev;
1404 unsigned long flags;
1405 u32 frame_status;
1406 u32 pkts_processed = 0;
1407 u32 tx_complete_cnt = 0;
1408 struct emac_tx_bd __iomem *curr_bd;
1409 struct emac_txch *txch = priv->txch[ch];
1410 u32 *tx_complete_ptr = txch->tx_complete;
1411
1412 if (unlikely(1 == txch->teardown_pending)) {
1413 if (netif_msg_tx_err(priv) && net_ratelimit()) {
1414 dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
1415 "teardown pending\n");
1416 }
1417 return 0; /* dont handle any pkt completions */
1418 }
1419 1028
1420 ++txch->proc_count; 1029 /* alloc a new packet for receive */
1421 spin_lock_irqsave(&priv->tx_lock, flags); 1030 skb = emac_rx_alloc(priv);
1422 curr_bd = txch->active_queue_head; 1031 if (!skb) {
1423 if (NULL == curr_bd) { 1032 if (netif_msg_rx_err(priv) && net_ratelimit())
1424 emac_write(EMAC_TXCP(ch), 1033 dev_err(emac_dev, "failed rx buffer alloc\n");
1425 emac_virt_to_phys(txch->last_hw_bdprocessed, priv)); 1034 return;
1426 txch->no_active_pkts++;
1427 spin_unlock_irqrestore(&priv->tx_lock, flags);
1428 return 0;
1429 } 1035 }
1430 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1431 frame_status = curr_bd->mode;
1432 while ((curr_bd) &&
1433 ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
1434 (pkts_processed < budget)) {
1435 emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
1436 txch->active_queue_head = curr_bd->next;
1437 if (frame_status & EMAC_CPPI_EOQ_BIT) {
1438 if (curr_bd->next) { /* misqueued packet */
1439 emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
1440 ++txch->mis_queued_packets;
1441 } else {
1442 txch->queue_active = 0; /* end of queue */
1443 }
1444 }
1445 1036
1446 dma_unmap_single(emac_dev, curr_bd->buff_ptr, 1037recycle:
1447 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, 1038 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1448 DMA_TO_DEVICE); 1039 skb_tailroom(skb), GFP_KERNEL);
1449 1040 if (WARN_ON(ret < 0))
1450 *tx_complete_ptr = (u32) curr_bd->buf_token; 1041 dev_kfree_skb_any(skb);
1451 ++tx_complete_ptr;
1452 ++tx_complete_cnt;
1453 curr_bd->next = txch->bd_pool_head;
1454 txch->bd_pool_head = curr_bd;
1455 --txch->active_queue_count;
1456 pkts_processed++;
1457 txch->last_hw_bdprocessed = curr_bd;
1458 curr_bd = txch->active_queue_head;
1459 if (curr_bd) {
1460 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1461 frame_status = curr_bd->mode;
1462 }
1463 } /* end of pkt processing loop */
1464
1465 emac_net_tx_complete(priv,
1466 (void *)&txch->tx_complete[0],
1467 tx_complete_cnt, ch);
1468 spin_unlock_irqrestore(&priv->tx_lock, flags);
1469 return pkts_processed;
1470} 1042}
1471 1043
1472#define EMAC_ERR_TX_OUT_OF_BD -1 1044static void emac_tx_handler(void *token, int len, int status)
1473
1474/**
1475 * emac_send: EMAC Transmit function (internal)
1476 * @priv: The DaVinci EMAC private adapter structure
1477 * @pkt: packet pointer (contains skb ptr)
1478 * @ch: TX channel number
1479 *
1480 * Called by the transmit function to queue the packet in EMAC hardware queue
1481 *
1482 * Returns success(0) or error code (typically out of desc's)
1483 */
1484static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
1485{ 1045{
1486 unsigned long flags; 1046 struct sk_buff *skb = token;
1487 struct emac_tx_bd __iomem *curr_bd; 1047 struct net_device *ndev = skb->dev;
1488 struct emac_txch *txch;
1489 struct emac_netbufobj *buf_list;
1490
1491 txch = priv->txch[ch];
1492 buf_list = pkt->buf_list; /* get handle to the buffer array */
1493
1494 /* check packet size and pad if short */
1495 if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
1496 buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
1497 pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
1498 }
1499 1048
1500 spin_lock_irqsave(&priv->tx_lock, flags); 1049 if (unlikely(netif_queue_stopped(ndev)))
1501 curr_bd = txch->bd_pool_head; 1050 netif_start_queue(ndev);
1502 if (curr_bd == NULL) { 1051 ndev->stats.tx_packets++;
1503 txch->out_of_tx_bd++; 1052 ndev->stats.tx_bytes += len;
1504 spin_unlock_irqrestore(&priv->tx_lock, flags); 1053 dev_kfree_skb_any(skb);
1505 return EMAC_ERR_TX_OUT_OF_BD;
1506 }
1507
1508 txch->bd_pool_head = curr_bd->next;
1509 curr_bd->buf_token = buf_list->buf_token;
1510 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
1511 buf_list->length, DMA_TO_DEVICE);
1512 curr_bd->off_b_len = buf_list->length;
1513 curr_bd->h_next = 0;
1514 curr_bd->next = NULL;
1515 curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
1516 EMAC_CPPI_EOP_BIT | pkt->pkt_length);
1517
1518 /* flush the packet from cache if write back cache is present */
1519 BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1520
1521 /* send the packet */
1522 if (txch->active_queue_head == NULL) {
1523 txch->active_queue_head = curr_bd;
1524 txch->active_queue_tail = curr_bd;
1525 if (1 != txch->queue_active) {
1526 emac_write(EMAC_TXHDP(ch),
1527 emac_virt_to_phys(curr_bd, priv));
1528 txch->queue_active = 1;
1529 }
1530 ++txch->queue_reinit;
1531 } else {
1532 register struct emac_tx_bd __iomem *tail_bd;
1533 register u32 frame_status;
1534
1535 tail_bd = txch->active_queue_tail;
1536 tail_bd->next = curr_bd;
1537 txch->active_queue_tail = curr_bd;
1538 tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
1539 tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
1540 frame_status = tail_bd->mode;
1541 if (frame_status & EMAC_CPPI_EOQ_BIT) {
1542 emac_write(EMAC_TXHDP(ch),
1543 emac_virt_to_phys(curr_bd, priv));
1544 frame_status &= ~(EMAC_CPPI_EOQ_BIT);
1545 tail_bd->mode = frame_status;
1546 ++txch->end_of_queue_add;
1547 }
1548 }
1549 txch->active_queue_count++;
1550 spin_unlock_irqrestore(&priv->tx_lock, flags);
1551 return 0;
1552} 1054}
1553 1055
1554/** 1056/**
@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1565{ 1067{
1566 struct device *emac_dev = &ndev->dev; 1068 struct device *emac_dev = &ndev->dev;
1567 int ret_code; 1069 int ret_code;
1568 struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
1569 struct emac_netpktobj tx_packet; /* packet object */
1570 struct emac_priv *priv = netdev_priv(ndev); 1070 struct emac_priv *priv = netdev_priv(ndev);
1571 1071
1572 /* If no link, return */ 1072 /* If no link, return */
1573 if (unlikely(!priv->link)) { 1073 if (unlikely(!priv->link)) {
1574 if (netif_msg_tx_err(priv) && net_ratelimit()) 1074 if (netif_msg_tx_err(priv) && net_ratelimit())
1575 dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); 1075 dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
1576 return NETDEV_TX_BUSY; 1076 goto fail_tx;
1077 }
1078
1079 ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
1080 if (unlikely(ret_code < 0)) {
1081 if (netif_msg_tx_err(priv) && net_ratelimit())
1082 dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
1083 goto fail_tx;
1577 } 1084 }
1578 1085
1579 /* Build the buffer and packet objects - Since only single fragment is 1086 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
1580 * supported, need not set length and token in both packet & object. 1087 GFP_KERNEL);
1581 * Doing so for completeness sake & to show that this needs to be done
1582 * in multifragment case
1583 */
1584 tx_packet.buf_list = &tx_buf;
1585 tx_packet.num_bufs = 1; /* only single fragment supported */
1586 tx_packet.pkt_length = skb->len;
1587 tx_packet.pkt_token = (void *)skb;
1588 tx_buf.length = skb->len;
1589 tx_buf.buf_token = (void *)skb;
1590 tx_buf.data_ptr = skb->data;
1591 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
1592 if (unlikely(ret_code != 0)) { 1088 if (unlikely(ret_code != 0)) {
1593 if (ret_code == EMAC_ERR_TX_OUT_OF_BD) { 1089 if (netif_msg_tx_err(priv) && net_ratelimit())
1594 if (netif_msg_tx_err(priv) && net_ratelimit()) 1090 dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
1595 dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\ 1091 goto fail_tx;
1596 " err. Out of TX BD's");
1597 netif_stop_queue(priv->ndev);
1598 }
1599 ndev->stats.tx_dropped++;
1600 return NETDEV_TX_BUSY;
1601 } 1092 }
1602 1093
1603 return NETDEV_TX_OK; 1094 return NETDEV_TX_OK;
1095
1096fail_tx:
1097 ndev->stats.tx_dropped++;
1098 netif_stop_queue(ndev);
1099 return NETDEV_TX_BUSY;
1604} 1100}
1605 1101
1606/** 1102/**
@@ -1621,218 +1117,16 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
1621 if (netif_msg_tx_err(priv)) 1117 if (netif_msg_tx_err(priv))
1622 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); 1118 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
1623 1119
1120 emac_dump_regs(priv);
1121
1624 ndev->stats.tx_errors++; 1122 ndev->stats.tx_errors++;
1625 emac_int_disable(priv); 1123 emac_int_disable(priv);
1626 emac_stop_txch(priv, EMAC_DEF_TX_CH); 1124 cpdma_chan_stop(priv->txchan);
1627 emac_cleanup_txch(priv, EMAC_DEF_TX_CH); 1125 cpdma_chan_start(priv->txchan);
1628 emac_init_txch(priv, EMAC_DEF_TX_CH);
1629 emac_write(EMAC_TXHDP(0), 0);
1630 emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
1631 emac_int_enable(priv); 1126 emac_int_enable(priv);
1632} 1127}
1633 1128
1634/** 1129/**
1635 * emac_net_alloc_rx_buf: Allocate a skb for RX
1636 * @priv: The DaVinci EMAC private adapter structure
1637 * @buf_size: size of SKB data buffer to allocate
1638 * @data_token: data token returned (skb handle for storing in buffer desc)
1639 * @ch: RX channel number
1640 *
1641 * Called during RX channel setup - allocates skb buffer of required size
1642 * and provides the skb handle and allocated buffer data pointer to caller
1643 *
1644 * Returns skb data pointer or 0 on failure to alloc skb
1645 */
1646static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
1647 void **data_token, u32 ch)
1648{
1649 struct net_device *ndev = priv->ndev;
1650 struct device *emac_dev = &ndev->dev;
1651 struct sk_buff *p_skb;
1652
1653 p_skb = dev_alloc_skb(buf_size);
1654 if (unlikely(NULL == p_skb)) {
1655 if (netif_msg_rx_err(priv) && net_ratelimit())
1656 dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
1657 return NULL;
1658 }
1659
1660 /* set device pointer in skb and reserve space for extra bytes */
1661 p_skb->dev = ndev;
1662 skb_reserve(p_skb, NET_IP_ALIGN);
1663 *data_token = (void *) p_skb;
1664 return p_skb->data;
1665}
1666
1667/**
1668 * emac_init_rxch: RX channel initialization
1669 * @priv: The DaVinci EMAC private adapter structure
1670 * @ch: RX channel number
1671 * @param: mac address for RX channel
1672 *
1673 * Called during device init to setup a RX channel (allocate buffers and
1674 * buffer descriptors, create queue and keep ready for reception
1675 *
1676 * Returns success(0) or mem alloc failures error code
1677 */
1678static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
1679{
1680 struct device *emac_dev = &priv->ndev->dev;
1681 u32 cnt, bd_size;
1682 void __iomem *mem;
1683 struct emac_rx_bd __iomem *curr_bd;
1684 struct emac_rxch *rxch = NULL;
1685
1686 rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
1687 if (NULL == rxch) {
1688 dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
1689 return -ENOMEM;
1690 }
1691 priv->rxch[ch] = rxch;
1692 rxch->buf_size = priv->rx_buf_size;
1693 rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
1694 rxch->queue_active = 0;
1695 rxch->teardown_pending = 0;
1696
1697 /* save mac address */
1698 for (cnt = 0; cnt < 6; cnt++)
1699 rxch->mac_addr[cnt] = param[cnt];
1700
1701 /* allocate buffer descriptor pool align every BD on four word
1702 * boundry for future requirements */
1703 bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
1704 rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
1705 rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
1706 rxch->bd_mem = EMAC_RX_BD_MEM(priv);
1707 __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
1708 rxch->pkt_queue.buf_list = &rxch->buf_queue;
1709
1710 /* allocate RX buffer and initialize the BD linked list */
1711 mem = (void __force __iomem *)
1712 (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
1713 rxch->active_queue_head = NULL;
1714 rxch->active_queue_tail = mem;
1715 for (cnt = 0; cnt < rxch->num_bd; cnt++) {
1716 curr_bd = mem + (cnt * bd_size);
1717 /* for future use the last parameter contains the BD ptr */
1718 curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
1719 rxch->buf_size,
1720 (void __force **)&curr_bd->buf_token,
1721 EMAC_DEF_RX_CH);
1722 if (curr_bd->data_ptr == NULL) {
1723 dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
1724 "failed for ch %d\n", ch);
1725 kfree(rxch);
1726 return -ENOMEM;
1727 }
1728
1729 /* populate the hardware descriptor */
1730 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
1731 priv);
1732 curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
1733 rxch->buf_size, DMA_FROM_DEVICE);
1734 curr_bd->off_b_len = rxch->buf_size;
1735 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1736
1737 /* write back to hardware memory */
1738 BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
1739 EMAC_BD_LENGTH_FOR_CACHE);
1740 curr_bd->next = rxch->active_queue_head;
1741 rxch->active_queue_head = curr_bd;
1742 }
1743
1744 /* At this point rxCppi->activeQueueHead points to the first
1745 RX BD ready to be given to RX HDP and rxch->active_queue_tail
1746 points to the last RX BD
1747 */
1748 return 0;
1749}
1750
1751/**
1752 * emac_rxch_teardown: RX channel teardown
1753 * @priv: The DaVinci EMAC private adapter structure
1754 * @ch: RX channel number
1755 *
1756 * Called during device stop to teardown RX channel
1757 *
1758 */
1759static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
1760{
1761 struct device *emac_dev = &priv->ndev->dev;
1762 u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
1763
1764 while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
1765 EMAC_TEARDOWN_VALUE) {
1766 /* wait till tx teardown complete */
1767 cpu_relax(); /* TODO: check if this helps ... */
1768 --teardown_cnt;
1769 if (0 == teardown_cnt) {
1770 dev_err(emac_dev, "EMAC: RX teardown aborted\n");
1771 break;
1772 }
1773 }
1774 emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
1775}
1776
1777/**
1778 * emac_stop_rxch: Stop RX channel operation
1779 * @priv: The DaVinci EMAC private adapter structure
1780 * @ch: RX channel number
1781 *
1782 * Called during device stop to stop RX channel operation
1783 *
1784 */
1785static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
1786{
1787 struct emac_rxch *rxch = priv->rxch[ch];
1788
1789 if (rxch) {
1790 rxch->teardown_pending = 1;
1791 emac_write(EMAC_RXTEARDOWN, ch);
1792 /* wait for teardown complete */
1793 emac_rxch_teardown(priv, ch);
1794 rxch->teardown_pending = 0;
1795 emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
1796 }
1797}
1798
1799/**
1800 * emac_cleanup_rxch: Book-keep function to clean RX channel resources
1801 * @priv: The DaVinci EMAC private adapter structure
1802 * @ch: RX channel number
1803 *
1804 * Called during device stop to clean up RX channel resources
1805 *
1806 */
1807static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
1808{
1809 struct emac_rxch *rxch = priv->rxch[ch];
1810 struct emac_rx_bd __iomem *curr_bd;
1811
1812 if (rxch) {
1813 /* free the receive buffers previously allocated */
1814 curr_bd = rxch->active_queue_head;
1815 while (curr_bd) {
1816 if (curr_bd->buf_token) {
1817 dma_unmap_single(&priv->ndev->dev,
1818 curr_bd->buff_ptr,
1819 curr_bd->off_b_len
1820 & EMAC_RX_BD_BUF_SIZE,
1821 DMA_FROM_DEVICE);
1822
1823 dev_kfree_skb_any((struct sk_buff *)\
1824 curr_bd->buf_token);
1825 }
1826 curr_bd = curr_bd->next;
1827 }
1828 if (rxch->bd_mem)
1829 rxch->bd_mem = NULL;
1830 kfree(rxch);
1831 priv->rxch[ch] = NULL;
1832 }
1833}
1834
1835/**
1836 * emac_set_type0addr: Set EMAC Type0 mac address 1130 * emac_set_type0addr: Set EMAC Type0 mac address
1837 * @priv: The DaVinci EMAC private adapter structure 1131 * @priv: The DaVinci EMAC private adapter structure
1838 * @ch: RX channel number 1132 * @ch: RX channel number
@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
1948static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) 1242static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1949{ 1243{
1950 struct emac_priv *priv = netdev_priv(ndev); 1244 struct emac_priv *priv = netdev_priv(ndev);
1951 struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
1952 struct device *emac_dev = &priv->ndev->dev; 1245 struct device *emac_dev = &priv->ndev->dev;
1953 struct sockaddr *sa = addr; 1246 struct sockaddr *sa = addr;
1954 1247
@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1959 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); 1252 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
1960 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); 1253 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
1961 1254
1962 /* If the interface is down - rxch is NULL. */
1963 /* MAC address is configured only after the interface is enabled. */ 1255 /* MAC address is configured only after the interface is enabled. */
1964 if (netif_running(ndev)) { 1256 if (netif_running(ndev)) {
1965 memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len); 1257 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
1966 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); 1258 emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
1967 } 1259 }
1968 1260
1969 if (netif_msg_drv(priv)) 1261 if (netif_msg_drv(priv))
@@ -1974,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1974} 1266}
1975 1267
1976/** 1268/**
1977 * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
1978 * @priv: The DaVinci EMAC private adapter structure
1979 * @ch: RX channel number to process buffer descriptors for
1980 * @curr_bd: current buffer descriptor
1981 * @buffer: buffer pointer for descriptor
1982 * @buf_token: buffer token (stores skb information)
1983 *
1984 * Prepares the recycled buffer descriptor and addes it to hardware
1985 * receive queue - if queue empty this descriptor becomes the head
1986 * else addes the descriptor to end of queue
1987 *
1988 */
1989static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
1990 struct emac_rx_bd __iomem *curr_bd,
1991 char *buffer, void *buf_token)
1992{
1993 struct emac_rxch *rxch = priv->rxch[ch];
1994
1995 /* populate the hardware descriptor */
1996 curr_bd->h_next = 0;
1997 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
1998 rxch->buf_size, DMA_FROM_DEVICE);
1999 curr_bd->off_b_len = rxch->buf_size;
2000 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
2001 curr_bd->next = NULL;
2002 curr_bd->data_ptr = buffer;
2003 curr_bd->buf_token = buf_token;
2004
2005 /* write back */
2006 BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2007 if (rxch->active_queue_head == NULL) {
2008 rxch->active_queue_head = curr_bd;
2009 rxch->active_queue_tail = curr_bd;
2010 if (0 != rxch->queue_active) {
2011 emac_write(EMAC_RXHDP(ch),
2012 emac_virt_to_phys(rxch->active_queue_head, priv));
2013 rxch->queue_active = 1;
2014 }
2015 } else {
2016 struct emac_rx_bd __iomem *tail_bd;
2017 u32 frame_status;
2018
2019 tail_bd = rxch->active_queue_tail;
2020 rxch->active_queue_tail = curr_bd;
2021 tail_bd->next = curr_bd;
2022 tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
2023 tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
2024 frame_status = tail_bd->mode;
2025 if (frame_status & EMAC_CPPI_EOQ_BIT) {
2026 emac_write(EMAC_RXHDP(ch),
2027 emac_virt_to_phys(curr_bd, priv));
2028 frame_status &= ~(EMAC_CPPI_EOQ_BIT);
2029 tail_bd->mode = frame_status;
2030 ++rxch->end_of_queue_add;
2031 }
2032 }
2033 ++rxch->recycled_bd;
2034}
2035
2036/**
2037 * emac_net_rx_cb: Prepares packet and sends to upper layer
2038 * @priv: The DaVinci EMAC private adapter structure
2039 * @net_pkt_list: Network packet list (received packets)
2040 *
2041 * Invalidates packet buffer memory and sends the received packet to upper
2042 * layer
2043 *
2044 * Returns success or appropriate error code (none as of now)
2045 */
2046static int emac_net_rx_cb(struct emac_priv *priv,
2047 struct emac_netpktobj *net_pkt_list)
2048{
2049 struct net_device *ndev = priv->ndev;
2050 struct sk_buff *p_skb = net_pkt_list->pkt_token;
2051 /* set length of packet */
2052 skb_put(p_skb, net_pkt_list->pkt_length);
2053 p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
2054 netif_receive_skb(p_skb);
2055 ndev->stats.rx_bytes += net_pkt_list->pkt_length;
2056 ndev->stats.rx_packets++;
2057 return 0;
2058}
2059
2060/**
2061 * emac_rx_bdproc: RX buffer descriptor (packet) processing
2062 * @priv: The DaVinci EMAC private adapter structure
2063 * @ch: RX channel number to process buffer descriptors for
2064 * @budget: number of packets allowed to process
2065 * @pending: indication to caller that packets are pending to process
2066 *
2067 * Processes RX buffer descriptors - checks ownership bit on the RX buffer
2068 * descriptor, sends the receive packet to upper layer, allocates a new SKB
2069 * and recycles the buffer descriptor (requeues it in hardware RX queue).
2070 * Only "budget" number of packets are processed and indication of pending
2071 * packets provided to the caller.
2072 *
2073 * Returns number of packets processed (and indication of pending packets)
2074 */
2075static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
2076{
2077 unsigned long flags;
2078 u32 frame_status;
2079 u32 pkts_processed = 0;
2080 char *new_buffer;
2081 struct emac_rx_bd __iomem *curr_bd;
2082 struct emac_rx_bd __iomem *last_bd;
2083 struct emac_netpktobj *curr_pkt, pkt_obj;
2084 struct emac_netbufobj buf_obj;
2085 struct emac_netbufobj *rx_buf_obj;
2086 void *new_buf_token;
2087 struct emac_rxch *rxch = priv->rxch[ch];
2088
2089 if (unlikely(1 == rxch->teardown_pending))
2090 return 0;
2091 ++rxch->proc_count;
2092 spin_lock_irqsave(&priv->rx_lock, flags);
2093 pkt_obj.buf_list = &buf_obj;
2094 curr_pkt = &pkt_obj;
2095 curr_bd = rxch->active_queue_head;
2096 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2097 frame_status = curr_bd->mode;
2098
2099 while ((curr_bd) &&
2100 ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
2101 (pkts_processed < budget)) {
2102
2103 new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
2104 &new_buf_token, EMAC_DEF_RX_CH);
2105 if (unlikely(NULL == new_buffer)) {
2106 ++rxch->out_of_rx_buffers;
2107 goto end_emac_rx_bdproc;
2108 }
2109
2110 /* populate received packet data structure */
2111 rx_buf_obj = &curr_pkt->buf_list[0];
2112 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
2113 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
2114 rx_buf_obj->buf_token = curr_bd->buf_token;
2115
2116 dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
2117 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
2118 DMA_FROM_DEVICE);
2119
2120 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
2121 curr_pkt->num_bufs = 1;
2122 curr_pkt->pkt_length =
2123 (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
2124 emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
2125 ++rxch->processed_bd;
2126 last_bd = curr_bd;
2127 curr_bd = last_bd->next;
2128 rxch->active_queue_head = curr_bd;
2129
2130 /* check if end of RX queue ? */
2131 if (frame_status & EMAC_CPPI_EOQ_BIT) {
2132 if (curr_bd) {
2133 ++rxch->mis_queued_packets;
2134 emac_write(EMAC_RXHDP(ch),
2135 emac_virt_to_phys(curr_bd, priv));
2136 } else {
2137 ++rxch->end_of_queue;
2138 rxch->queue_active = 0;
2139 }
2140 }
2141
2142 /* recycle BD */
2143 emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
2144 new_buf_token);
2145
2146 /* return the packet to the user - BD ptr passed in
2147 * last parameter for potential *future* use */
2148 spin_unlock_irqrestore(&priv->rx_lock, flags);
2149 emac_net_rx_cb(priv, curr_pkt);
2150 spin_lock_irqsave(&priv->rx_lock, flags);
2151 curr_bd = rxch->active_queue_head;
2152 if (curr_bd) {
2153 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2154 frame_status = curr_bd->mode;
2155 }
2156 ++pkts_processed;
2157 }
2158
2159end_emac_rx_bdproc:
2160 spin_unlock_irqrestore(&priv->rx_lock, flags);
2161 return pkts_processed;
2162}
2163
2164/**
2165 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception 1269 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
2166 * @priv: The DaVinci EMAC private adapter structure 1270 * @priv: The DaVinci EMAC private adapter structure
2167 * 1271 *
@@ -2172,7 +1276,7 @@ end_emac_rx_bdproc:
2172 */ 1276 */
2173static int emac_hw_enable(struct emac_priv *priv) 1277static int emac_hw_enable(struct emac_priv *priv)
2174{ 1278{
2175 u32 ch, val, mbp_enable, mac_control; 1279 u32 val, mbp_enable, mac_control;
2176 1280
2177 /* Soft reset */ 1281 /* Soft reset */
2178 emac_write(EMAC_SOFTRESET, 1); 1282 emac_write(EMAC_SOFTRESET, 1);
@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
2215 emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); 1319 emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
2216 priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; 1320 priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
2217 1321
2218 val = emac_read(EMAC_TXCONTROL);
2219 val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
2220 emac_write(EMAC_TXCONTROL, val);
2221 val = emac_read(EMAC_RXCONTROL);
2222 val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
2223 emac_write(EMAC_RXCONTROL, val);
2224 emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); 1322 emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
2225 1323
2226 for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) { 1324 emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
2227 emac_write(EMAC_TXHDP(ch), 0);
2228 emac_write(EMAC_TXINTMASKSET, BIT(ch));
2229 }
2230 for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
2231 struct emac_rxch *rxch = priv->rxch[ch];
2232 emac_setmac(priv, ch, rxch->mac_addr);
2233 emac_write(EMAC_RXINTMASKSET, BIT(ch));
2234 rxch->queue_active = 1;
2235 emac_write(EMAC_RXHDP(ch),
2236 emac_virt_to_phys(rxch->active_queue_head, priv));
2237 }
2238 1325
2239 /* Enable MII */ 1326 /* Enable MII */
2240 val = emac_read(EMAC_MACCONTROL); 1327 val = emac_read(EMAC_MACCONTROL);
@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
2279 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; 1366 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
2280 1367
2281 if (status & mask) { 1368 if (status & mask) {
2282 num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, 1369 num_tx_pkts = cpdma_chan_process(priv->txchan,
2283 EMAC_DEF_TX_MAX_SERVICE); 1370 EMAC_DEF_TX_MAX_SERVICE);
2284 } /* TX processing */ 1371 } /* TX processing */
2285 1372
2286 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; 1373 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
2289 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; 1376 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
2290 1377
2291 if (status & mask) { 1378 if (status & mask) {
2292 num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); 1379 num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
2293 } /* RX processing */ 1380 } /* RX processing */
2294 1381
2295 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; 1382 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
2348} 1435}
2349#endif 1436#endif
2350 1437
2351/* PHY/MII bus related */
2352
2353/* Wait until mdio is ready for next command */
2354#define MDIO_WAIT_FOR_USER_ACCESS\
2355 while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
2356 MDIO_USERACCESS_GO) != 0)
2357
2358static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
2359{
2360 unsigned int phy_data = 0;
2361 unsigned int phy_control;
2362
2363 /* Wait until mdio is ready for next command */
2364 MDIO_WAIT_FOR_USER_ACCESS;
2365
2366 phy_control = (MDIO_USERACCESS_GO |
2367 MDIO_USERACCESS_READ |
2368 ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
2369 ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
2370 (phy_data & MDIO_USERACCESS_DATA));
2371 emac_mdio_write(MDIO_USERACCESS(0), phy_control);
2372
2373 /* Wait until mdio is ready for next command */
2374 MDIO_WAIT_FOR_USER_ACCESS;
2375
2376 return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
2377
2378}
2379
2380static int emac_mii_write(struct mii_bus *bus, int phy_id,
2381 int phy_reg, u16 phy_data)
2382{
2383
2384 unsigned int control;
2385
2386 /* until mdio is ready for next command */
2387 MDIO_WAIT_FOR_USER_ACCESS;
2388
2389 control = (MDIO_USERACCESS_GO |
2390 MDIO_USERACCESS_WRITE |
2391 ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
2392 ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
2393 (phy_data & MDIO_USERACCESS_DATA));
2394 emac_mdio_write(MDIO_USERACCESS(0), control);
2395
2396 return 0;
2397}
2398
2399static int emac_mii_reset(struct mii_bus *bus)
2400{
2401 unsigned int clk_div;
2402 int mdio_bus_freq = emac_bus_frequency;
2403
2404 if (mdio_max_freq && mdio_bus_freq)
2405 clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
2406 else
2407 clk_div = 0xFF;
2408
2409 clk_div &= MDIO_CONTROL_CLKDIV;
2410
2411 /* Set enable and clock divider in MDIOControl */
2412 emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
2413
2414 return 0;
2415
2416}
2417
2418static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
2419
2420/* emac_driver: EMAC MII bus structure */
2421
2422static struct mii_bus *emac_mii;
2423
2424static void emac_adjust_link(struct net_device *ndev) 1438static void emac_adjust_link(struct net_device *ndev)
2425{ 1439{
2426 struct emac_priv *priv = netdev_priv(ndev); 1440 struct emac_priv *priv = netdev_priv(ndev);
@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
2485 return -EOPNOTSUPP; 1499 return -EOPNOTSUPP;
2486} 1500}
2487 1501
1502static int match_first_device(struct device *dev, void *data)
1503{
1504 return 1;
1505}
1506
2488/** 1507/**
2489 * emac_dev_open: EMAC device open 1508 * emac_dev_open: EMAC device open
2490 * @ndev: The DaVinci EMAC network adapter 1509 * @ndev: The DaVinci EMAC network adapter
@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
2498static int emac_dev_open(struct net_device *ndev) 1517static int emac_dev_open(struct net_device *ndev)
2499{ 1518{
2500 struct device *emac_dev = &ndev->dev; 1519 struct device *emac_dev = &ndev->dev;
2501 u32 rc, cnt, ch; 1520 u32 cnt;
2502 int phy_addr;
2503 struct resource *res; 1521 struct resource *res;
2504 int q, m; 1522 int q, m, ret;
2505 int i = 0; 1523 int i = 0;
2506 int k = 0; 1524 int k = 0;
2507 struct emac_priv *priv = netdev_priv(ndev); 1525 struct emac_priv *priv = netdev_priv(ndev);
@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
2513 /* Configuration items */ 1531 /* Configuration items */
2514 priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; 1532 priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
2515 1533
2516 /* Clear basic hardware */
2517 for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
2518 emac_write(EMAC_TXHDP(ch), 0);
2519 emac_write(EMAC_RXHDP(ch), 0);
2520 emac_write(EMAC_RXHDP(ch), 0);
2521 emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
2522 emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
2523 }
2524 priv->mac_hash1 = 0; 1534 priv->mac_hash1 = 0;
2525 priv->mac_hash2 = 0; 1535 priv->mac_hash2 = 0;
2526 emac_write(EMAC_MACHASH1, 0); 1536 emac_write(EMAC_MACHASH1, 0);
2527 emac_write(EMAC_MACHASH2, 0); 1537 emac_write(EMAC_MACHASH2, 0);
2528 1538
2529 /* multi ch not supported - open 1 TX, 1RX ch by default */ 1539 for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
2530 rc = emac_init_txch(priv, EMAC_DEF_TX_CH); 1540 struct sk_buff *skb = emac_rx_alloc(priv);
2531 if (0 != rc) { 1541
2532 dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed"); 1542 if (!skb)
2533 return rc; 1543 break;
2534 } 1544
2535 rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr); 1545 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
2536 if (0 != rc) { 1546 skb_tailroom(skb), GFP_KERNEL);
2537 dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed"); 1547 if (WARN_ON(ret < 0))
2538 return rc; 1548 break;
2539 } 1549 }
2540 1550
2541 /* Request IRQ */ 1551 /* Request IRQ */
@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
2560 emac_set_coalesce(ndev, &coal); 1570 emac_set_coalesce(ndev, &coal);
2561 } 1571 }
2562 1572
2563 /* find the first phy */ 1573 cpdma_ctlr_start(priv->dma);
1574
2564 priv->phydev = NULL; 1575 priv->phydev = NULL;
2565 if (priv->phy_mask) { 1576 /* use the first phy on the bus if pdata did not give us a phy id */
2566 emac_mii_reset(priv->mii_bus); 1577 if (!priv->phy_id) {
2567 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 1578 struct device *phy;
2568 if (priv->mii_bus->phy_map[phy_addr]) {
2569 priv->phydev = priv->mii_bus->phy_map[phy_addr];
2570 break;
2571 }
2572 }
2573 1579
2574 if (!priv->phydev) { 1580 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
2575 printk(KERN_ERR "%s: no PHY found\n", ndev->name); 1581 match_first_device);
2576 return -1; 1582 if (phy)
2577 } 1583 priv->phy_id = dev_name(phy);
1584 }
2578 1585
2579 priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev), 1586 if (priv->phy_id && *priv->phy_id) {
2580 &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1587 priv->phydev = phy_connect(ndev, priv->phy_id,
1588 &emac_adjust_link, 0,
1589 PHY_INTERFACE_MODE_MII);
2581 1590
2582 if (IS_ERR(priv->phydev)) { 1591 if (IS_ERR(priv->phydev)) {
2583 printk(KERN_ERR "%s: Could not attach to PHY\n", 1592 dev_err(emac_dev, "could not connect to phy %s\n",
2584 ndev->name); 1593 priv->phy_id);
1594 priv->phydev = NULL;
2585 return PTR_ERR(priv->phydev); 1595 return PTR_ERR(priv->phydev);
2586 } 1596 }
2587 1597
@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
2589 priv->speed = 0; 1599 priv->speed = 0;
2590 priv->duplex = ~0; 1600 priv->duplex = ~0;
2591 1601
2592 printk(KERN_INFO "%s: attached PHY driver [%s] " 1602 dev_info(emac_dev, "attached PHY driver [%s] "
2593 "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name, 1603 "(mii_bus:phy_addr=%s, id=%x)\n",
2594 priv->phydev->drv->name, dev_name(&priv->phydev->dev), 1604 priv->phydev->drv->name, dev_name(&priv->phydev->dev),
2595 priv->phydev->phy_id); 1605 priv->phydev->phy_id);
2596 } else{ 1606 } else {
2597 /* No PHY , fix the link, speed and duplex settings */ 1607 /* No PHY , fix the link, speed and duplex settings */
1608 dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
2598 priv->link = 1; 1609 priv->link = 1;
2599 priv->speed = SPEED_100; 1610 priv->speed = SPEED_100;
2600 priv->duplex = DUPLEX_FULL; 1611 priv->duplex = DUPLEX_FULL;
@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
2607 if (netif_msg_drv(priv)) 1618 if (netif_msg_drv(priv))
2608 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); 1619 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
2609 1620
2610 if (priv->phy_mask) 1621 if (priv->phydev)
2611 phy_start(priv->phydev); 1622 phy_start(priv->phydev);
2612 1623
2613 return 0; 1624 return 0;
@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
2648 1659
2649 netif_carrier_off(ndev); 1660 netif_carrier_off(ndev);
2650 emac_int_disable(priv); 1661 emac_int_disable(priv);
2651 emac_stop_txch(priv, EMAC_DEF_TX_CH); 1662 cpdma_ctlr_stop(priv->dma);
2652 emac_stop_rxch(priv, EMAC_DEF_RX_CH);
2653 emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
2654 emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
2655 emac_write(EMAC_SOFTRESET, 1); 1663 emac_write(EMAC_SOFTRESET, 1);
2656 1664
2657 if (priv->phydev) 1665 if (priv->phydev)
@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2756 struct resource *res; 1764 struct resource *res;
2757 struct net_device *ndev; 1765 struct net_device *ndev;
2758 struct emac_priv *priv; 1766 struct emac_priv *priv;
2759 unsigned long size; 1767 unsigned long size, hw_ram_addr;
2760 struct emac_platform_data *pdata; 1768 struct emac_platform_data *pdata;
2761 struct device *emac_dev; 1769 struct device *emac_dev;
1770 struct cpdma_params dma_params;
2762 1771
2763 /* obtain emac clock from kernel */ 1772 /* obtain emac clock from kernel */
2764 emac_clk = clk_get(&pdev->dev, NULL); 1773 emac_clk = clk_get(&pdev->dev, NULL);
@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2782 priv->ndev = ndev; 1791 priv->ndev = ndev;
2783 priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); 1792 priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
2784 1793
2785 spin_lock_init(&priv->tx_lock);
2786 spin_lock_init(&priv->rx_lock);
2787 spin_lock_init(&priv->lock); 1794 spin_lock_init(&priv->lock);
2788 1795
2789 pdata = pdev->dev.platform_data; 1796 pdata = pdev->dev.platform_data;
@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2794 1801
2795 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1802 /* MAC addr and PHY mask , RMII enable info from platform_data */
2796 memcpy(priv->mac_addr, pdata->mac_addr, 6); 1803 memcpy(priv->mac_addr, pdata->mac_addr, 6);
2797 priv->phy_mask = pdata->phy_mask; 1804 priv->phy_id = pdata->phy_id;
2798 priv->rmii_en = pdata->rmii_en; 1805 priv->rmii_en = pdata->rmii_en;
2799 priv->version = pdata->version; 1806 priv->version = pdata->version;
2800 priv->int_enable = pdata->interrupt_enable; 1807 priv->int_enable = pdata->interrupt_enable;
@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2831 ndev->base_addr = (unsigned long)priv->remap_addr; 1838 ndev->base_addr = (unsigned long)priv->remap_addr;
2832 1839
2833 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; 1840 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
2834 priv->ctrl_ram_size = pdata->ctrl_ram_size;
2835 priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
2836 1841
2837 if (pdata->hw_ram_addr) 1842 hw_ram_addr = pdata->hw_ram_addr;
2838 priv->hw_ram_addr = pdata->hw_ram_addr; 1843 if (!hw_ram_addr)
2839 else 1844 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
2840 priv->hw_ram_addr = (u32 __force)res->start + 1845
2841 pdata->ctrl_ram_offset; 1846 memset(&dma_params, 0, sizeof(dma_params));
1847 dma_params.dev = emac_dev;
1848 dma_params.dmaregs = priv->emac_base;
1849 dma_params.rxthresh = priv->emac_base + 0x120;
1850 dma_params.rxfree = priv->emac_base + 0x140;
1851 dma_params.txhdp = priv->emac_base + 0x600;
1852 dma_params.rxhdp = priv->emac_base + 0x620;
1853 dma_params.txcp = priv->emac_base + 0x640;
1854 dma_params.rxcp = priv->emac_base + 0x660;
1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
1857 dma_params.desc_mem_phys = hw_ram_addr;
1858 dma_params.desc_mem_size = pdata->ctrl_ram_size;
1859 dma_params.desc_align = 16;
1860
1861 priv->dma = cpdma_ctlr_create(&dma_params);
1862 if (!priv->dma) {
1863 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
1864 rc = -ENOMEM;
1865 goto no_dma;
1866 }
1867
1868 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
1869 emac_tx_handler);
1870 priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
1871 emac_rx_handler);
1872 if (WARN_ON(!priv->txchan || !priv->rxchan)) {
1873 rc = -ENOMEM;
1874 goto no_irq_res;
1875 }
2842 1876
2843 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1877 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2844 if (!res) { 1878 if (!res) {
@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2871 } 1905 }
2872 1906
2873 1907
2874 /* MII/Phy intialisation, mdio bus registration */
2875 emac_mii = mdiobus_alloc();
2876 if (emac_mii == NULL) {
2877 dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
2878 rc = -ENOMEM;
2879 goto mdio_alloc_err;
2880 }
2881
2882 priv->mii_bus = emac_mii;
2883 emac_mii->name = "emac-mii",
2884 emac_mii->read = emac_mii_read,
2885 emac_mii->write = emac_mii_write,
2886 emac_mii->reset = emac_mii_reset,
2887 emac_mii->irq = mii_irqs,
2888 emac_mii->phy_mask = ~(priv->phy_mask);
2889 emac_mii->parent = &pdev->dev;
2890 emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
2891 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
2892 mdio_max_freq = pdata->mdio_max_freq;
2893 emac_mii->reset(emac_mii);
2894
2895 /* Register the MII bus */
2896 rc = mdiobus_register(emac_mii);
2897 if (rc)
2898 goto mdiobus_quit;
2899
2900 if (netif_msg_probe(priv)) { 1908 if (netif_msg_probe(priv)) {
2901 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ 1909 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
2902 "(regs: %p, irq: %d)\n", 1910 "(regs: %p, irq: %d)\n",
@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2904 } 1912 }
2905 return 0; 1913 return 0;
2906 1914
2907mdiobus_quit:
2908 mdiobus_free(emac_mii);
2909
2910netdev_reg_err: 1915netdev_reg_err:
2911mdio_alloc_err:
2912 clk_disable(emac_clk); 1916 clk_disable(emac_clk);
2913no_irq_res: 1917no_irq_res:
1918 if (priv->txchan)
1919 cpdma_chan_destroy(priv->txchan);
1920 if (priv->rxchan)
1921 cpdma_chan_destroy(priv->rxchan);
1922 cpdma_ctlr_destroy(priv->dma);
1923no_dma:
2914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1924 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2915 release_mem_region(res->start, res->end - res->start + 1); 1925 release_mem_region(res->start, res->end - res->start + 1);
2916 iounmap(priv->remap_addr); 1926 iounmap(priv->remap_addr);
@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2938 1948
2939 platform_set_drvdata(pdev, NULL); 1949 platform_set_drvdata(pdev, NULL);
2940 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1950 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2941 mdiobus_unregister(priv->mii_bus); 1951
2942 mdiobus_free(priv->mii_bus); 1952 if (priv->txchan)
1953 cpdma_chan_destroy(priv->txchan);
1954 if (priv->rxchan)
1955 cpdma_chan_destroy(priv->rxchan);
1956 cpdma_ctlr_destroy(priv->dma);
2943 1957
2944 release_mem_region(res->start, res->end - res->start + 1); 1958 release_mem_region(res->start, res->end - res->start + 1);
2945 1959
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/davinci_mdio.c
@@ -0,0 +1,475 @@
1/*
2 * DaVinci MDIO Module driver
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
7 *
8 * Copyright (C) 2009 Texas Instruments.
9 *
10 * ---------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
26 */
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/io.h>
37#include <linux/davinci_emac.h>
38
39/*
40 * This timeout definition is a worst-case ultra defensive measure against
41 * unexpected controller lock ups. Ideally, we should never ever hit this
42 * scenario in practice.
43 */
44#define MDIO_TIMEOUT 100 /* msecs */
45
46#define PHY_REG_MASK 0x1f
47#define PHY_ID_MASK 0x1f
48
49#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
50
51struct davinci_mdio_regs {
52 u32 version;
53 u32 control;
54#define CONTROL_IDLE BIT(31)
55#define CONTROL_ENABLE BIT(30)
56#define CONTROL_MAX_DIV (0xff)
57
58 u32 alive;
59 u32 link;
60 u32 linkintraw;
61 u32 linkintmasked;
62 u32 __reserved_0[2];
63 u32 userintraw;
64 u32 userintmasked;
65 u32 userintmaskset;
66 u32 userintmaskclr;
67 u32 __reserved_1[20];
68
69 struct {
70 u32 access;
71#define USERACCESS_GO BIT(31)
72#define USERACCESS_WRITE BIT(30)
73#define USERACCESS_ACK BIT(29)
74#define USERACCESS_READ (0)
75#define USERACCESS_DATA (0xffff)
76
77 u32 physel;
78 } user[0];
79};
80
81struct mdio_platform_data default_pdata = {
82 .bus_freq = DEF_OUT_FREQ,
83};
84
85struct davinci_mdio_data {
86 struct mdio_platform_data pdata;
87 struct davinci_mdio_regs __iomem *regs;
88 spinlock_t lock;
89 struct clk *clk;
90 struct device *dev;
91 struct mii_bus *bus;
92 bool suspended;
93 unsigned long access_time; /* jiffies */
94};
95
96static void __davinci_mdio_reset(struct davinci_mdio_data *data)
97{
98 u32 mdio_in, div, mdio_out_khz, access_time;
99
100 mdio_in = clk_get_rate(data->clk);
101 div = (mdio_in / data->pdata.bus_freq) - 1;
102 if (div > CONTROL_MAX_DIV)
103 div = CONTROL_MAX_DIV;
104
105 /* set enable and clock divider */
106 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
107
108 /*
109 * One mdio transaction consists of:
110 * 32 bits of preamble
111 * 32 bits of transferred data
112 * 24 bits of bus yield (not needed unless shared?)
113 */
114 mdio_out_khz = mdio_in / (1000 * (div + 1));
115 access_time = (88 * 1000) / mdio_out_khz;
116
117 /*
118 * In the worst case, we could be kicking off a user-access immediately
119 * after the mdio bus scan state-machine triggered its own read. If
120 * so, our request could get deferred by one access cycle. We
121 * defensively allow for 4 access cycles.
122 */
123 data->access_time = usecs_to_jiffies(access_time * 4);
124 if (!data->access_time)
125 data->access_time = 1;
126}
127
128static int davinci_mdio_reset(struct mii_bus *bus)
129{
130 struct davinci_mdio_data *data = bus->priv;
131 u32 phy_mask, ver;
132
133 __davinci_mdio_reset(data);
134
135 /* wait for scan logic to settle */
136 msleep(PHY_MAX_ADDR * data->access_time);
137
138 /* dump hardware version info */
139 ver = __raw_readl(&data->regs->version);
140 dev_info(data->dev, "davinci mdio revision %d.%d\n",
141 (ver >> 8) & 0xff, ver & 0xff);
142
143 /* get phy mask from the alive register */
144 phy_mask = __raw_readl(&data->regs->alive);
145 if (phy_mask) {
146 /* restrict mdio bus to live phys only */
147 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
148 phy_mask = ~phy_mask;
149 } else {
150 /* desperately scan all phys */
151 dev_warn(data->dev, "no live phy, scanning all\n");
152 phy_mask = 0;
153 }
154 data->bus->phy_mask = phy_mask;
155
156 return 0;
157}
158
159/* wait until hardware is ready for another user access */
160static inline int wait_for_user_access(struct davinci_mdio_data *data)
161{
162 struct davinci_mdio_regs __iomem *regs = data->regs;
163 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
164 u32 reg;
165
166 while (time_after(timeout, jiffies)) {
167 reg = __raw_readl(&regs->user[0].access);
168 if ((reg & USERACCESS_GO) == 0)
169 return 0;
170
171 reg = __raw_readl(&regs->control);
172 if ((reg & CONTROL_IDLE) == 0)
173 continue;
174
175 /*
176 * An emac soft_reset may have clobbered the mdio controller's
177 * state machine. We need to reset and retry the current
178 * operation
179 */
180 dev_warn(data->dev, "resetting idled controller\n");
181 __davinci_mdio_reset(data);
182 return -EAGAIN;
183 }
184 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT;
186}
187
188/* wait until hardware state machine is idle */
189static inline int wait_for_idle(struct davinci_mdio_data *data)
190{
191 struct davinci_mdio_regs __iomem *regs = data->regs;
192 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
193
194 while (time_after(timeout, jiffies)) {
195 if (__raw_readl(&regs->control) & CONTROL_IDLE)
196 return 0;
197 }
198 dev_err(data->dev, "timed out waiting for idle\n");
199 return -ETIMEDOUT;
200}
201
202static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
203{
204 struct davinci_mdio_data *data = bus->priv;
205 u32 reg;
206 int ret;
207
208 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
209 return -EINVAL;
210
211 spin_lock(&data->lock);
212
213 if (data->suspended) {
214 spin_unlock(&data->lock);
215 return -ENODEV;
216 }
217
218 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
219 (phy_id << 16));
220
221 while (1) {
222 ret = wait_for_user_access(data);
223 if (ret == -EAGAIN)
224 continue;
225 if (ret < 0)
226 break;
227
228 __raw_writel(reg, &data->regs->user[0].access);
229
230 ret = wait_for_user_access(data);
231 if (ret == -EAGAIN)
232 continue;
233 if (ret < 0)
234 break;
235
236 reg = __raw_readl(&data->regs->user[0].access);
237 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
238 break;
239 }
240
241 spin_unlock(&data->lock);
242
243 return ret;
244}
245
246static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
247 int phy_reg, u16 phy_data)
248{
249 struct davinci_mdio_data *data = bus->priv;
250 u32 reg;
251 int ret;
252
253 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
254 return -EINVAL;
255
256 spin_lock(&data->lock);
257
258 if (data->suspended) {
259 spin_unlock(&data->lock);
260 return -ENODEV;
261 }
262
263 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
264 (phy_id << 16) | (phy_data & USERACCESS_DATA));
265
266 while (1) {
267 ret = wait_for_user_access(data);
268 if (ret == -EAGAIN)
269 continue;
270 if (ret < 0)
271 break;
272
273 __raw_writel(reg, &data->regs->user[0].access);
274
275 ret = wait_for_user_access(data);
276 if (ret == -EAGAIN)
277 continue;
278 break;
279 }
280
281 spin_unlock(&data->lock);
282
283 return 0;
284}
285
286static int __devinit davinci_mdio_probe(struct platform_device *pdev)
287{
288 struct mdio_platform_data *pdata = pdev->dev.platform_data;
289 struct device *dev = &pdev->dev;
290 struct davinci_mdio_data *data;
291 struct resource *res;
292 struct phy_device *phy;
293 int ret, addr;
294
295 data = kzalloc(sizeof(*data), GFP_KERNEL);
296 if (!data) {
297 dev_err(dev, "failed to alloc device data\n");
298 return -ENOMEM;
299 }
300
301 data->pdata = pdata ? (*pdata) : default_pdata;
302
303 data->bus = mdiobus_alloc();
304 if (!data->bus) {
305 dev_err(dev, "failed to alloc mii bus\n");
306 ret = -ENOMEM;
307 goto bail_out;
308 }
309
310 data->bus->name = dev_name(dev);
311 data->bus->read = davinci_mdio_read,
312 data->bus->write = davinci_mdio_write,
313 data->bus->reset = davinci_mdio_reset,
314 data->bus->parent = dev;
315 data->bus->priv = data;
316 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
317
318 data->clk = clk_get(dev, NULL);
319 if (IS_ERR(data->clk)) {
320 data->clk = NULL;
321 dev_err(dev, "failed to get device clock\n");
322 ret = PTR_ERR(data->clk);
323 goto bail_out;
324 }
325
326 clk_enable(data->clk);
327
328 dev_set_drvdata(dev, data);
329 data->dev = dev;
330 spin_lock_init(&data->lock);
331
332 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
333 if (!res) {
334 dev_err(dev, "could not find register map resource\n");
335 ret = -ENOENT;
336 goto bail_out;
337 }
338
339 res = devm_request_mem_region(dev, res->start, resource_size(res),
340 dev_name(dev));
341 if (!res) {
342 dev_err(dev, "could not allocate register map resource\n");
343 ret = -ENXIO;
344 goto bail_out;
345 }
346
347 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
348 if (!data->regs) {
349 dev_err(dev, "could not map mdio registers\n");
350 ret = -ENOMEM;
351 goto bail_out;
352 }
353
354 /* register the mii bus */
355 ret = mdiobus_register(data->bus);
356 if (ret)
357 goto bail_out;
358
359 /* scan and dump the bus */
360 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
361 phy = data->bus->phy_map[addr];
362 if (phy) {
363 dev_info(dev, "phy[%d]: device %s, driver %s\n",
364 phy->addr, dev_name(&phy->dev),
365 phy->drv ? phy->drv->name : "unknown");
366 }
367 }
368
369 return 0;
370
371bail_out:
372 if (data->bus)
373 mdiobus_free(data->bus);
374
375 if (data->clk) {
376 clk_disable(data->clk);
377 clk_put(data->clk);
378 }
379
380 kfree(data);
381
382 return ret;
383}
384
385static int __devexit davinci_mdio_remove(struct platform_device *pdev)
386{
387 struct device *dev = &pdev->dev;
388 struct davinci_mdio_data *data = dev_get_drvdata(dev);
389
390 if (data->bus)
391 mdiobus_free(data->bus);
392
393 if (data->clk) {
394 clk_disable(data->clk);
395 clk_put(data->clk);
396 }
397
398 dev_set_drvdata(dev, NULL);
399
400 kfree(data);
401
402 return 0;
403}
404
405static int davinci_mdio_suspend(struct device *dev)
406{
407 struct davinci_mdio_data *data = dev_get_drvdata(dev);
408 u32 ctrl;
409
410 spin_lock(&data->lock);
411
412 /* shutdown the scan state machine */
413 ctrl = __raw_readl(&data->regs->control);
414 ctrl &= ~CONTROL_ENABLE;
415 __raw_writel(ctrl, &data->regs->control);
416 wait_for_idle(data);
417
418 if (data->clk)
419 clk_disable(data->clk);
420
421 data->suspended = true;
422 spin_unlock(&data->lock);
423
424 return 0;
425}
426
427static int davinci_mdio_resume(struct device *dev)
428{
429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
430 u32 ctrl;
431
432 spin_lock(&data->lock);
433 if (data->clk)
434 clk_enable(data->clk);
435
436 /* restart the scan state machine */
437 ctrl = __raw_readl(&data->regs->control);
438 ctrl |= CONTROL_ENABLE;
439 __raw_writel(ctrl, &data->regs->control);
440
441 data->suspended = false;
442 spin_unlock(&data->lock);
443
444 return 0;
445}
446
447static const struct dev_pm_ops davinci_mdio_pm_ops = {
448 .suspend = davinci_mdio_suspend,
449 .resume = davinci_mdio_resume,
450};
451
452static struct platform_driver davinci_mdio_driver = {
453 .driver = {
454 .name = "davinci_mdio",
455 .owner = THIS_MODULE,
456 .pm = &davinci_mdio_pm_ops,
457 },
458 .probe = davinci_mdio_probe,
459 .remove = __devexit_p(davinci_mdio_remove),
460};
461
462static int __init davinci_mdio_init(void)
463{
464 return platform_driver_register(&davinci_mdio_driver);
465}
466device_initcall(davinci_mdio_init);
467
468static void __exit davinci_mdio_exit(void)
469{
470 platform_driver_unregister(&davinci_mdio_driver);
471}
472module_exit(davinci_mdio_exit);
473
474MODULE_LICENSE("GPL");
475MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 143906417048..f6e0d40cd876 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
124 return 0; 124 return 0;
125} 125}
126 126
127static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
128{
129 struct mlx4_en_dev *endev = ctx;
130
131 return endev->pndev[port];
132}
133
127static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 134static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
128 enum mlx4_dev_event event, int port) 135 enum mlx4_dev_event event, int port)
129{ 136{
@@ -282,9 +289,11 @@ err_free_res:
282} 289}
283 290
284static struct mlx4_interface mlx4_en_interface = { 291static struct mlx4_interface mlx4_en_interface = {
285 .add = mlx4_en_add, 292 .add = mlx4_en_add,
286 .remove = mlx4_en_remove, 293 .remove = mlx4_en_remove,
287 .event = mlx4_en_event, 294 .event = mlx4_en_event,
295 .get_dev = mlx4_en_get_netdev,
296 .protocol = MLX4_PROTOCOL_EN,
288}; 297};
289 298
290static int __init mlx4_en_init(void) 299static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 79478bd4211a..6d6806b361e3 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
69 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_priv *priv = netdev_priv(dev);
70 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_dev *mdev = priv->mdev;
71 int err; 71 int err;
72 int idx;
72 73
73 if (!priv->vlgrp) 74 if (!priv->vlgrp)
74 return; 75 return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
83 if (err) 84 if (err)
84 en_err(priv, "Failed configuring VLAN filter\n"); 85 en_err(priv, "Failed configuring VLAN filter\n");
85 } 86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
86 mutex_unlock(&mdev->state_lock); 89 mutex_unlock(&mdev->state_lock);
90
87} 91}
88 92
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 93static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
91 struct mlx4_en_priv *priv = netdev_priv(dev); 95 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev; 96 struct mlx4_en_dev *mdev = priv->mdev;
93 int err; 97 int err;
98 int idx;
94 99
95 if (!priv->vlgrp) 100 if (!priv->vlgrp)
96 return; 101 return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
101 106
102 /* Remove VID from port VLAN filter */ 107 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock); 108 mutex_lock(&mdev->state_lock);
109 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
110 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
111 else
112 en_err(priv, "could not find vid %d in cache\n", vid);
113
104 if (mdev->device_up && priv->port_up) { 114 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 115 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 116 if (err)
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index aa3ef2aee5bf..7f5a3221e0c1 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 127 memset(context, 0, sizeof *context);
128 128
129 context->base_qpn = cpu_to_be32(base_qpn); 129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); 130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); 131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
132 context->intra_no_vlan = 0; 132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 134 context->intra_vlan_miss = 0;
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index f6511aa2b7df..092e814b1981 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,7 +36,8 @@
36 36
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_EN_SHIFT 31
40#define SET_PORT_PROMISC_MODE_SHIFT 30
40 41
41enum { 42enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b716e1a1b298..b68eee2414c2 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
98 [20] = "Address vector port checking support", 98 [20] = "Address vector port checking support",
99 [21] = "UD multicast support", 99 [21] = "UD multicast support",
100 [24] = "Demand paging support", 100 [24] = "Demand paging support",
101 [25] = "Router support" 101 [25] = "Router support",
102 [30] = "IBoE support"
102 }; 103 };
103 int i; 104 int i;
104 105
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 555067802751..73c94fcdfddf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
161 161
162 mutex_unlock(&intf_mutex); 162 mutex_unlock(&intf_mutex);
163} 163}
164
165void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_device_context *dev_ctx;
169 unsigned long flags;
170 void *result = NULL;
171
172 spin_lock_irqsave(&priv->ctx_lock, flags);
173
174 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
175 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
176 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
177 break;
178 }
179
180 spin_unlock_irqrestore(&priv->ctx_lock, flags);
181
182 return result;
183}
184EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 569fa3df381f..782f11d8fa71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
103 103
104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); 106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
107 107
108int mlx4_check_port_params(struct mlx4_dev *dev, 108int mlx4_check_port_params(struct mlx4_dev *dev,
109 enum mlx4_port_type *port_type) 109 enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
1310 return -1; 1310 return -1;
1311 } 1311 }
1312 1312
1313 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { 1313 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1314 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1314 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1315 return -1; 1315 return -1;
1316 } 1316 }
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 1fc16ab7ad2f..dfed6a07c2d7 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
475 char *mc_addrs; 475 char *mc_addrs;
476 int mc_addrs_cnt; 476 int mc_addrs_cnt;
477 struct mlx4_en_stat_out_mbox hw_stats; 477 struct mlx4_en_stat_out_mbox hw_stats;
478 int vids[128];
478}; 479};
479 480
480 481
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 8674ad5764c4..451339559bdc 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -188,6 +188,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
188 return err; 188 return err;
189} 189}
190 190
191int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
192{
193 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
194 int i;
195
196 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
197 if (table->refs[i] &&
198 (vid == (MLX4_VLAN_MASK &
199 be32_to_cpu(table->entries[i])))) {
200 /* VLAN already registered, increase reference count */
201 *idx = i;
202 return 0;
203 }
204 }
205
206 return -ENOENT;
207}
208EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
209
191int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 210int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
192{ 211{
193 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 212 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index a8e5856ce882..64bfdae5956f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2075 } else { 2075 } else {
2076 /* Try reading mac address from device. if EEPROM is present 2076 /* Try reading mac address from device. if EEPROM is present
2077 * it will already have been set */ 2077 * it will already have been set */
2078 smsc911x_read_mac_address(dev); 2078 smsc_get_mac(dev);
2079 2079
2080 if (is_valid_ether_addr(dev->dev_addr)) { 2080 if (is_valid_ether_addr(dev->dev_addr)) {
2081 /* eeprom values are valid so use them */ 2081 /* eeprom values are valid so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
2176/* Entry point for loading the module */ 2176/* Entry point for loading the module */
2177static int __init smsc911x_init_module(void) 2177static int __init smsc911x_init_module(void)
2178{ 2178{
2179 SMSC_INITIALIZE();
2179 return platform_driver_register(&smsc911x_driver); 2180 return platform_driver_register(&smsc911x_driver);
2180} 2181}
2181 2182
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c65ce2..52f38e12a879 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -394,4 +394,15 @@
394#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \ 394#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
395 LPA_PAUSE_ASYM) 395 LPA_PAUSE_ASYM)
396 396
397/*
398 * Provide hooks to let the arch add to the initialisation procedure
399 * and to override the source of the MAC address.
400 */
401#define SMSC_INITIALIZE() do {} while (0)
402#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
403
404#ifdef CONFIG_SMSC911X_ARCH_HOOKS
405#include <asm/smsc911x.h>
406#endif
407
397#endif /* __SMSC911X_H__ */ 408#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 630fb8664768..458bb57914a3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1610,6 +1610,8 @@ static void netback_changed(struct xenbus_device *dev,
1610 switch (backend_state) { 1610 switch (backend_state) {
1611 case XenbusStateInitialising: 1611 case XenbusStateInitialising:
1612 case XenbusStateInitialised: 1612 case XenbusStateInitialised:
1613 case XenbusStateReconfiguring:
1614 case XenbusStateReconfigured:
1613 case XenbusStateConnected: 1615 case XenbusStateConnected:
1614 case XenbusStateUnknown: 1616 case XenbusStateUnknown:
1615 case XenbusStateClosed: 1617 case XenbusStateClosed:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index f3f8be5a35fa..14f0955eca68 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -430,8 +430,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
430 } 430 }
431 431
432 /* Get the protocol type of the ethernet frame that arrived */ 432 /* Get the protocol type of the ethernet frame that arrived */
433 proto_type = ((in_be32(addr + XEL_HEADER_OFFSET + 433 proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
434 XEL_RXBUFF_OFFSET) >> XEL_HEADER_SHIFT) & 434 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
435 XEL_RPLR_LENGTH_MASK); 435 XEL_RPLR_LENGTH_MASK);
436 436
437 /* Check if received ethernet frame is a raw ethernet frame 437 /* Check if received ethernet frame is a raw ethernet frame
@@ -439,9 +439,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
439 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 439 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
440 440
441 if (proto_type == ETH_P_IP) { 441 if (proto_type == ETH_P_IP) {
442 length = ((in_be32(addr + 442 length = ((ntohl(in_be32(addr +
443 XEL_HEADER_IP_LENGTH_OFFSET + 443 XEL_HEADER_IP_LENGTH_OFFSET +
444 XEL_RXBUFF_OFFSET) >> 444 XEL_RXBUFF_OFFSET)) >>
445 XEL_HEADER_SHIFT) & 445 XEL_HEADER_SHIFT) &
446 XEL_RPLR_LENGTH_MASK); 446 XEL_RPLR_LENGTH_MASK);
447 length += ETH_HLEN + ETH_FCS_LEN; 447 length += ETH_HLEN + ETH_FCS_LEN;