aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKrzysztof Hałasa <khc@pm.waw.pl>2008-12-19 19:53:08 -0500
committerKrzysztof Hałasa <khc@pm.waw.pl>2008-12-21 18:42:55 -0500
commit2098c18d6cf65358dd1620154bdedbc8c8d36f44 (patch)
tree0021ff59e512a67b31cca20894086acff91107d1
parentb4c7d3b07257528d3c0bfd07c5b38b48beb9b6d1 (diff)
IXP4xx: Add PHYLIB support to Ethernet driver.
Signed-off-by: Krzysztof Hałasa <khc@pm.waw.pl>
-rw-r--r--drivers/net/arm/Kconfig2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c194
2 files changed, 96 insertions, 100 deletions
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index abe17762e6f..2895db13bfa 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -59,7 +59,7 @@ config EP93XX_ETH
59config IXP4XX_ETH 59config IXP4XX_ETH
60 tristate "Intel IXP4xx Ethernet support" 60 tristate "Intel IXP4xx Ethernet support"
61 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR 61 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
62 select MII 62 select PHYLIB
63 help 63 help
64 Say Y here if you want to use built-in Ethernet ports 64 Say Y here if you want to use built-in Ethernet ports
65 on IXP4xx processor. 65 on IXP4xx processor.
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 44679f804be..db44ebbf45c 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,7 +30,7 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/mii.h> 33#include <linux/phy.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <mach/npe.h> 35#include <mach/npe.h>
36#include <mach/qmgr.h> 36#include <mach/qmgr.h>
@@ -59,7 +59,6 @@
59#define NAPI_WEIGHT 16 59#define NAPI_WEIGHT 16
60#define MDIO_INTERVAL (3 * HZ) 60#define MDIO_INTERVAL (3 * HZ)
61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 62#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64 63
65#define NPE_ID(port_id) ((port_id) >> 4) 64#define NPE_ID(port_id) ((port_id) >> 4)
@@ -164,14 +163,13 @@ struct port {
164 struct npe *npe; 163 struct npe *npe;
165 struct net_device *netdev; 164 struct net_device *netdev;
166 struct napi_struct napi; 165 struct napi_struct napi;
167 struct mii_if_info mii; 166 struct phy_device *phydev;
168 struct delayed_work mdio_thread;
169 struct eth_plat_info *plat; 167 struct eth_plat_info *plat;
170 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 168 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
171 struct desc *desc_tab; /* coherent */ 169 struct desc *desc_tab; /* coherent */
172 u32 desc_tab_phys; 170 u32 desc_tab_phys;
173 int id; /* logical port ID */ 171 int id; /* logical port ID */
174 u16 mii_bmcr; 172 int speed, duplex;
175}; 173};
176 174
177/* NPE message structure */ 175/* NPE message structure */
@@ -242,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
242 240
243static spinlock_t mdio_lock; 241static spinlock_t mdio_lock;
244static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 242static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
243struct mii_bus *mdio_bus;
245static int ports_open; 244static int ports_open;
246static struct port *npe_port_tab[MAX_NPES]; 245static struct port *npe_port_tab[MAX_NPES];
247static struct dma_pool *dma_pool; 246static struct dma_pool *dma_pool;
248 247
249 248
250static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, 249static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
251 int write, u16 cmd) 250 int write, u16 cmd)
252{ 251{
253 int cycles = 0; 252 int cycles = 0;
254 253
255 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 254 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
256 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); 255 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
257 return 0; 256 return -1;
258 } 257 }
259 258
260 if (write) { 259 if (write) {
@@ -273,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
273 } 272 }
274 273
275 if (cycles == MAX_MDIO_RETRIES) { 274 if (cycles == MAX_MDIO_RETRIES) {
276 printk(KERN_ERR "%s: MII write failed\n", dev->name); 275 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
277 return 0; 276 phy_id);
277 return -1;
278 } 278 }
279 279
280#if DEBUG_MDIO 280#if DEBUG_MDIO
281 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, 281 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
282 cycles); 282 phy_id, write ? "write" : "read", cycles);
283#endif 283#endif
284 284
285 if (write) 285 if (write)
286 return 0; 286 return 0;
287 287
288 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 288 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
289 printk(KERN_ERR "%s: MII read failed\n", dev->name); 289#if DEBUG_MDIO
290 return 0; 290 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
291 phy_id);
292#endif
293 return 0xFFFF; /* don't return error */
291 } 294 }
292 295
293 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 296 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
294 (__raw_readl(&mdio_regs->mdio_status[1]) << 8); 297 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
295} 298}
296 299
297static int mdio_read(struct net_device *dev, int phy_id, int location) 300static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
298{ 301{
299 unsigned long flags; 302 unsigned long flags;
300 u16 val; 303 int ret;
301 304
302 spin_lock_irqsave(&mdio_lock, flags); 305 spin_lock_irqsave(&mdio_lock, flags);
303 val = mdio_cmd(dev, phy_id, location, 0, 0); 306 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
304 spin_unlock_irqrestore(&mdio_lock, flags); 307 spin_unlock_irqrestore(&mdio_lock, flags);
305 return val; 308#if DEBUG_MDIO
309 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
310 phy_id, location, ret);
311#endif
312 return ret;
306} 313}
307 314
308static void mdio_write(struct net_device *dev, int phy_id, int location, 315static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
309 int val) 316 u16 val)
310{ 317{
311 unsigned long flags; 318 unsigned long flags;
319 int ret;
312 320
313 spin_lock_irqsave(&mdio_lock, flags); 321 spin_lock_irqsave(&mdio_lock, flags);
314 mdio_cmd(dev, phy_id, location, 1, val); 322 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
315 spin_unlock_irqrestore(&mdio_lock, flags); 323 spin_unlock_irqrestore(&mdio_lock, flags);
324#if DEBUG_MDIO
325 printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
326 bus->name, phy_id, location, val, ret);
327#endif
328 return ret;
316} 329}
317 330
318static void phy_reset(struct net_device *dev, int phy_id) 331static int ixp4xx_mdio_register(void)
319{ 332{
320 struct port *port = netdev_priv(dev); 333 int err;
321 int cycles = 0; 334
335 if (!(mdio_bus = mdiobus_alloc()))
336 return -ENOMEM;
322 337
323 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); 338 /* All MII PHY accesses use NPE-B Ethernet registers */
339 spin_lock_init(&mdio_lock);
340 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
341 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
324 342
325 while (cycles < MAX_MII_RESET_RETRIES) { 343 mdio_bus->name = "IXP4xx MII Bus";
326 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { 344 mdio_bus->read = &ixp4xx_mdio_read;
327#if DEBUG_MDIO 345 mdio_bus->write = &ixp4xx_mdio_write;
328 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", 346 strcpy(mdio_bus->id, "0");
329 dev->name, cycles);
330#endif
331 return;
332 }
333 udelay(1);
334 cycles++;
335 }
336 347
337 printk(KERN_ERR "%s: MII reset failed\n", dev->name); 348 if ((err = mdiobus_register(mdio_bus)))
349 mdiobus_free(mdio_bus);
350 return err;
338} 351}
339 352
340static void eth_set_duplex(struct port *port) 353static void ixp4xx_mdio_remove(void)
341{ 354{
342 if (port->mii.full_duplex) 355 mdiobus_unregister(mdio_bus);
343 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 356 mdiobus_free(mdio_bus);
344 &port->regs->tx_control[0]);
345 else
346 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
347 &port->regs->tx_control[0]);
348} 357}
349 358
350 359
351static void phy_check_media(struct port *port, int init) 360static void ixp4xx_adjust_link(struct net_device *dev)
352{ 361{
353 if (mii_check_media(&port->mii, 1, init)) 362 struct port *port = netdev_priv(dev);
354 eth_set_duplex(port); 363 struct phy_device *phydev = port->phydev;
355 if (port->mii.force_media) { /* mii_check_media() doesn't work */ 364
356 struct net_device *dev = port->netdev; 365 if (!phydev->link) {
357 int cur_link = mii_link_ok(&port->mii); 366 if (port->speed) {
358 int prev_link = netif_carrier_ok(dev); 367 port->speed = 0;
359
360 if (!prev_link && cur_link) {
361 printk(KERN_INFO "%s: link up\n", dev->name);
362 netif_carrier_on(dev);
363 } else if (prev_link && !cur_link) {
364 printk(KERN_INFO "%s: link down\n", dev->name); 368 printk(KERN_INFO "%s: link down\n", dev->name);
365 netif_carrier_off(dev);
366 } 369 }
370 return;
367 } 371 }
368}
369 372
373 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
374 return;
370 375
371static void mdio_thread(struct work_struct *work) 376 port->speed = phydev->speed;
372{ 377 port->duplex = phydev->duplex;
373 struct port *port = container_of(work, struct port, mdio_thread.work);
374 378
375 phy_check_media(port, 0); 379 if (port->duplex)
376 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); 380 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
381 &port->regs->tx_control[0]);
382 else
383 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
384 &port->regs->tx_control[0]);
385
386 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
387 dev->name, port->speed, port->duplex ? "full" : "half");
377} 388}
378 389
379 390
@@ -777,16 +788,9 @@ static void eth_set_mcast_list(struct net_device *dev)
777 788
778static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 789static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
779{ 790{
780 struct port *port = netdev_priv(dev);
781 unsigned int duplex_chg;
782 int err;
783
784 if (!netif_running(dev)) 791 if (!netif_running(dev))
785 return -EINVAL; 792 return -EINVAL;
786 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); 793 return -EINVAL;
787 if (duplex_chg)
788 eth_set_duplex(port);
789 return err;
790} 794}
791 795
792 796
@@ -938,8 +942,6 @@ static int eth_open(struct net_device *dev)
938 } 942 }
939 } 943 }
940 944
941 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
942
943 memset(&msg, 0, sizeof(msg)); 945 memset(&msg, 0, sizeof(msg));
944 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 946 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
945 msg.eth_id = port->id; 947 msg.eth_id = port->id;
@@ -977,6 +979,9 @@ static int eth_open(struct net_device *dev)
977 return err; 979 return err;
978 } 980 }
979 981
982 port->speed = 0; /* force "link up" message */
983 phy_start(port->phydev);
984
980 for (i = 0; i < ETH_ALEN; i++) 985 for (i = 0; i < ETH_ALEN; i++)
981 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 986 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
982 __raw_writel(0x08, &port->regs->random_seed); 987 __raw_writel(0x08, &port->regs->random_seed);
@@ -1004,10 +1009,8 @@ static int eth_open(struct net_device *dev)
1004 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1009 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1005 1010
1006 napi_enable(&port->napi); 1011 napi_enable(&port->napi);
1007 phy_check_media(port, 1);
1008 eth_set_mcast_list(dev); 1012 eth_set_mcast_list(dev);
1009 netif_start_queue(dev); 1013 netif_start_queue(dev);
1010 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1011 1014
1012 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1015 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1013 eth_rx_irq, dev); 1016 eth_rx_irq, dev);
@@ -1098,14 +1101,10 @@ static int eth_close(struct net_device *dev)
1098 printk(KERN_CRIT "%s: unable to disable loopback\n", 1101 printk(KERN_CRIT "%s: unable to disable loopback\n",
1099 dev->name); 1102 dev->name);
1100 1103
1101 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & 1104 phy_stop(port->phydev);
1102 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1103 mdio_write(dev, port->plat->phy, MII_BMCR,
1104 port->mii_bmcr | BMCR_PDOWN);
1105 1105
1106 if (!ports_open) 1106 if (!ports_open)
1107 qmgr_disable_irq(TXDONE_QUEUE); 1107 qmgr_disable_irq(TXDONE_QUEUE);
1108 cancel_rearming_delayed_work(&port->mdio_thread);
1109 destroy_queues(port); 1108 destroy_queues(port);
1110 release_queues(port); 1109 release_queues(port);
1111 return 0; 1110 return 0;
@@ -1117,6 +1116,7 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1117 struct net_device *dev; 1116 struct net_device *dev;
1118 struct eth_plat_info *plat = pdev->dev.platform_data; 1117 struct eth_plat_info *plat = pdev->dev.platform_data;
1119 u32 regs_phys; 1118 u32 regs_phys;
1119 char phy_id[BUS_ID_SIZE];
1120 int err; 1120 int err;
1121 1121
1122 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1122 if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1182,22 +1182,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1182 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1182 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1183 udelay(50); 1183 udelay(50);
1184 1184
1185 port->mii.dev = dev; 1185 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
1186 port->mii.mdio_read = mdio_read; 1186 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1187 port->mii.mdio_write = mdio_write; 1187 PHY_INTERFACE_MODE_MII);
1188 port->mii.phy_id = plat->phy; 1188 if (IS_ERR(port->phydev)) {
1189 port->mii.phy_id_mask = 0x1F; 1189 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1190 port->mii.reg_num_mask = 0x1F; 1190 return PTR_ERR(port->phydev);
1191 }
1192
1193 port->phydev->irq = PHY_POLL;
1191 1194
1192 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1195 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1193 npe_name(port->npe)); 1196 npe_name(port->npe));
1194 1197
1195 phy_reset(dev, plat->phy);
1196 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1197 ~(BMCR_RESET | BMCR_PDOWN);
1198 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1199
1200 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1201 return 0; 1198 return 0;
1202 1199
1203err_unreg: 1200err_unreg:
@@ -1231,20 +1228,19 @@ static struct platform_driver ixp4xx_eth_driver = {
1231 1228
1232static int __init eth_init_module(void) 1229static int __init eth_init_module(void)
1233{ 1230{
1231 int err;
1234 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 1232 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1235 return -ENOSYS; 1233 return -ENOSYS;
1236 1234
1237 /* All MII PHY accesses use NPE-B Ethernet registers */ 1235 if ((err = ixp4xx_mdio_register()))
1238 spin_lock_init(&mdio_lock); 1236 return err;
1239 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1240 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1241
1242 return platform_driver_register(&ixp4xx_eth_driver); 1237 return platform_driver_register(&ixp4xx_eth_driver);
1243} 1238}
1244 1239
1245static void __exit eth_cleanup_module(void) 1240static void __exit eth_cleanup_module(void)
1246{ 1241{
1247 platform_driver_unregister(&ixp4xx_eth_driver); 1242 platform_driver_unregister(&ixp4xx_eth_driver);
1243 ixp4xx_mdio_remove();
1248} 1244}
1249 1245
1250MODULE_AUTHOR("Krzysztof Halasa"); 1246MODULE_AUTHOR("Krzysztof Halasa");