aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-18 04:52:36 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-12 15:38:26 -0400
commit9f2f381f813858755f5b6ef7af316feda0726ef3 (patch)
treed92fe910d8a8ef2c06ddeeb88ecd786d315f713e /drivers/net/ethernet/cadence
parent7b35f03338a8557122e62ea1a011f1628b978e8d (diff)
macb: Move the Atmel driver
Move the Atmel driver into drivers/net/ethernet/cadence/ and make the necessary Kconfig and Makefile changes. CC: Nicolas Ferre <nicolas.ferre@atmel.com> CC: Jamie Iles <jamie@jamieiles.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Acked-by: Jamie Iles <jamie@jamieiles.com> Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/Kconfig44
-rw-r--r--drivers/net/ethernet/cadence/Makefile6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1254
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h109
-rw-r--r--drivers/net/ethernet/cadence/macb.c1366
-rw-r--r--drivers/net/ethernet/cadence/macb.h394
6 files changed, 3173 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
new file mode 100644
index 000000000000..c00e706ab58a
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -0,0 +1,44 @@
1#
2# Atmel device configuration
3#
4
5config HAVE_NET_MACB
6 bool
7
8config NET_ATMEL
9 bool "Atmel devices"
10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
11 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y.
13 Make sure you know the name of your card. Read the Ethernet-HOWTO,
14 available from <http://www.tldp.org/docs.html#howto>.
15
16 If unsure, say Y.
17
18 Note that the answer to this question doesn't directly affect the
19 kernel: saying N will just cause the configurator to skip all
20 the remaining Atmel network card questions. If you say Y, you will be
21 asked for your specific card in the following questions.
22
23if NET_ATMEL
24
25config ARM_AT91_ETHER
26 tristate "AT91RM9200 Ethernet support"
27 depends on ARM && ARCH_AT91RM9200
28 select MII
29 ---help---
30 If you wish to compile a kernel for the AT91RM9200 and enable
31 ethernet support, then you should always answer Y to this.
32
33config MACB
34 tristate "Atmel MACB support"
35 depends on HAVE_NET_MACB
36 select PHYLIB
37 ---help---
38 The Atmel MACB ethernet interface is found on many AT32 and AT91
39 parts. Say Y to include support for the MACB chip.
40
41 To compile this driver as a module, choose M here: the module
42 will be called macb.
43
44endif # NET_ATMEL
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
new file mode 100644
index 000000000000..9068b8331ed1
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Atmel network device drivers.
3#
4
5obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
6obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
new file mode 100644
index 000000000000..29dc43523cec
--- /dev/null
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -0,0 +1,1254 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) 2003 SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson 01/11/2003
8 *
9 * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
10 * (Polaroid Corporation)
11 *
12 * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/mii.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/dma-mapping.h>
28#include <linux/ethtool.h>
29#include <linux/platform_device.h>
30#include <linux/clk.h>
31#include <linux/gfp.h>
32
33#include <asm/io.h>
34#include <asm/uaccess.h>
35#include <asm/mach-types.h>
36
37#include <mach/at91rm9200_emac.h>
38#include <mach/gpio.h>
39#include <mach/board.h>
40
41#include "at91_ether.h"
42
43#define DRV_NAME "at91_ether"
44#define DRV_VERSION "1.0"
45
46#define LINK_POLL_INTERVAL (HZ)
47
48/* ..................................................................... */
49
50/*
51 * Read from a EMAC register.
52 */
53static inline unsigned long at91_emac_read(unsigned int reg)
54{
55 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
56
57 return __raw_readl(emac_base + reg);
58}
59
60/*
61 * Write to a EMAC register.
62 */
63static inline void at91_emac_write(unsigned int reg, unsigned long value)
64{
65 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
66
67 __raw_writel(value, emac_base + reg);
68}
69
70/* ........................... PHY INTERFACE ........................... */
71
72/*
73 * Enable the MDIO bit in MAC control register
74 * When not called from an interrupt-handler, access to the PHY must be
75 * protected by a spinlock.
76 */
77static void enable_mdi(void)
78{
79 unsigned long ctl;
80
81 ctl = at91_emac_read(AT91_EMAC_CTL);
82 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
83}
84
85/*
86 * Disable the MDIO bit in the MAC control register
87 */
88static void disable_mdi(void)
89{
90 unsigned long ctl;
91
92 ctl = at91_emac_read(AT91_EMAC_CTL);
93 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
94}
95
96/*
97 * Wait until the PHY operation is complete.
98 */
99static inline void at91_phy_wait(void) {
100 unsigned long timeout = jiffies + 2;
101
102 while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
103 if (time_after(jiffies, timeout)) {
104 printk("at91_ether: MIO timeout\n");
105 break;
106 }
107 cpu_relax();
108 }
109}
110
111/*
112 * Write value to the a PHY register
113 * Note: MDI interface is assumed to already have been enabled.
114 */
115static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value)
116{
117 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
118 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
119
120 /* Wait until IDLE bit in Network Status register is cleared */
121 at91_phy_wait();
122}
123
124/*
125 * Read value stored in a PHY register.
126 * Note: MDI interface is assumed to already have been enabled.
127 */
128static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value)
129{
130 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
131 | ((phy_addr & 0x1f) << 23) | (address << 18));
132
133 /* Wait until IDLE bit in Network Status register is cleared */
134 at91_phy_wait();
135
136 *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA;
137}
138
139/* ........................... PHY MANAGEMENT .......................... */
140
141/*
142 * Access the PHY to determine the current link speed and mode, and update the
143 * MAC accordingly.
144 * If no link or auto-negotiation is busy, then no changes are made.
145 */
146static void update_linkspeed(struct net_device *dev, int silent)
147{
148 struct at91_private *lp = netdev_priv(dev);
149 unsigned int bmsr, bmcr, lpa, mac_cfg;
150 unsigned int speed, duplex;
151
152 if (!mii_link_ok(&lp->mii)) { /* no link */
153 netif_carrier_off(dev);
154 if (!silent)
155 printk(KERN_INFO "%s: Link down.\n", dev->name);
156 return;
157 }
158
159 /* Link up, or auto-negotiation still in progress */
160 read_phy(lp->phy_address, MII_BMSR, &bmsr);
161 read_phy(lp->phy_address, MII_BMCR, &bmcr);
162 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
163 if (!(bmsr & BMSR_ANEGCOMPLETE))
164 return; /* Do nothing - another interrupt generated when negotiation complete */
165
166 read_phy(lp->phy_address, MII_LPA, &lpa);
167 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
168 else speed = SPEED_10;
169 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
170 else duplex = DUPLEX_HALF;
171 } else {
172 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
173 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
174 }
175
176 /* Update the MAC */
177 mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
178 if (speed == SPEED_100) {
179 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
180 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
181 else /* 100 Half Duplex */
182 mac_cfg |= AT91_EMAC_SPD;
183 } else {
184 if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
185 mac_cfg |= AT91_EMAC_FD;
186 else {} /* 10 Half Duplex */
187 }
188 at91_emac_write(AT91_EMAC_CFG, mac_cfg);
189
190 if (!silent)
191 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
192 netif_carrier_on(dev);
193}
194
195/*
196 * Handle interrupts from the PHY
197 */
198static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
199{
200 struct net_device *dev = (struct net_device *) dev_id;
201 struct at91_private *lp = netdev_priv(dev);
202 unsigned int phy;
203
204 /*
205 * This hander is triggered on both edges, but the PHY chips expect
206 * level-triggering. We therefore have to check if the PHY actually has
207 * an IRQ pending.
208 */
209 enable_mdi();
210 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
211 read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
212 if (!(phy & (1 << 0)))
213 goto done;
214 }
215 else if (lp->phy_type == MII_LXT971A_ID) {
216 read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
217 if (!(phy & (1 << 2)))
218 goto done;
219 }
220 else if (lp->phy_type == MII_BCM5221_ID) {
221 read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
222 if (!(phy & (1 << 0)))
223 goto done;
224 }
225 else if (lp->phy_type == MII_KS8721_ID) {
226 read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
227 if (!(phy & ((1 << 2) | 1)))
228 goto done;
229 }
230 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
231 read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy);
232 if (!(phy & ((1 << 2) | 1)))
233 goto done;
234 }
235 else if (lp->phy_type == MII_DP83848_ID) {
236 read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
237 if (!(phy & (1 << 7)))
238 goto done;
239 }
240
241 update_linkspeed(dev, 0);
242
243done:
244 disable_mdi();
245
246 return IRQ_HANDLED;
247}
248
249/*
250 * Initialize and enable the PHY interrupt for link-state changes
251 */
252static void enable_phyirq(struct net_device *dev)
253{
254 struct at91_private *lp = netdev_priv(dev);
255 unsigned int dsintr, irq_number;
256 int status;
257
258 irq_number = lp->board_data.phy_irq_pin;
259 if (!irq_number) {
260 /*
261 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
262 * or board does not have it connected.
263 */
264 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
265 return;
266 }
267
268 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
269 if (status) {
270 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
271 return;
272 }
273
274 spin_lock_irq(&lp->lock);
275 enable_mdi();
276
277 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
278 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
279 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
280 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
281 }
282 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
283 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
284 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
285 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
286 }
287 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
288 dsintr = (1 << 15) | ( 1 << 14);
289 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
290 }
291 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
292 dsintr = (1 << 10) | ( 1 << 8);
293 write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
294 }
295 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
296 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
297 dsintr = dsintr | 0x500; /* set bits 8, 10 */
298 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
299 }
300 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
301 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
302 dsintr = dsintr | 0x3c; /* set bits 2..5 */
303 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
304 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
305 dsintr = dsintr | 0x3; /* set bits 0,1 */
306 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
307 }
308
309 disable_mdi();
310 spin_unlock_irq(&lp->lock);
311}
312
313/*
314 * Disable the PHY interrupt
315 */
316static void disable_phyirq(struct net_device *dev)
317{
318 struct at91_private *lp = netdev_priv(dev);
319 unsigned int dsintr;
320 unsigned int irq_number;
321
322 irq_number = lp->board_data.phy_irq_pin;
323 if (!irq_number) {
324 del_timer_sync(&lp->check_timer);
325 return;
326 }
327
328 spin_lock_irq(&lp->lock);
329 enable_mdi();
330
331 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
332 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
333 dsintr = dsintr | 0xf00; /* set bits 8..11 */
334 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
335 }
336 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
337 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
338 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
339 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
340 }
341 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
342 read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr);
343 dsintr = ~(1 << 14);
344 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
345 }
346 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
347 read_phy(lp->phy_address, MII_TPISTATUS, &dsintr);
348 dsintr = ~((1 << 10) | (1 << 8));
349 write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
350 }
351 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
352 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
353 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
354 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
355 }
356 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
357 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
358 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
359 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
360 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
361 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
362 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
363 }
364
365 disable_mdi();
366 spin_unlock_irq(&lp->lock);
367
368 free_irq(irq_number, dev); /* Free interrupt handler */
369}
370
371/*
372 * Perform a software reset of the PHY.
373 */
374#if 0
375static void reset_phy(struct net_device *dev)
376{
377 struct at91_private *lp = netdev_priv(dev);
378 unsigned int bmcr;
379
380 spin_lock_irq(&lp->lock);
381 enable_mdi();
382
383 /* Perform PHY reset */
384 write_phy(lp->phy_address, MII_BMCR, BMCR_RESET);
385
386 /* Wait until PHY reset is complete */
387 do {
388 read_phy(lp->phy_address, MII_BMCR, &bmcr);
389 } while (!(bmcr & BMCR_RESET));
390
391 disable_mdi();
392 spin_unlock_irq(&lp->lock);
393}
394#endif
395
396static void at91ether_check_link(unsigned long dev_id)
397{
398 struct net_device *dev = (struct net_device *) dev_id;
399 struct at91_private *lp = netdev_priv(dev);
400
401 enable_mdi();
402 update_linkspeed(dev, 1);
403 disable_mdi();
404
405 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
406}
407
408/* ......................... ADDRESS MANAGEMENT ........................ */
409
410/*
411 * NOTE: Your bootloader must always set the MAC address correctly before
412 * booting into Linux.
413 *
414 * - It must always set the MAC address after reset, even if it doesn't
415 * happen to access the Ethernet while it's booting. Some versions of
416 * U-Boot on the AT91RM9200-DK do not do this.
417 *
418 * - Likewise it must store the addresses in the correct byte order.
419 * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
420 * continues to do so, for bug-compatibility).
421 */
422
423static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
424{
425 char addr[6];
426
427 if (machine_is_csb337()) {
428 addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
429 addr[4] = (lo & 0xff00) >> 8;
430 addr[3] = (lo & 0xff0000) >> 16;
431 addr[2] = (lo & 0xff000000) >> 24;
432 addr[1] = (hi & 0xff);
433 addr[0] = (hi & 0xff00) >> 8;
434 }
435 else {
436 addr[0] = (lo & 0xff);
437 addr[1] = (lo & 0xff00) >> 8;
438 addr[2] = (lo & 0xff0000) >> 16;
439 addr[3] = (lo & 0xff000000) >> 24;
440 addr[4] = (hi & 0xff);
441 addr[5] = (hi & 0xff00) >> 8;
442 }
443
444 if (is_valid_ether_addr(addr)) {
445 memcpy(dev->dev_addr, &addr, 6);
446 return 1;
447 }
448 return 0;
449}
450
451/*
452 * Set the ethernet MAC address in dev->dev_addr
453 */
454static void __init get_mac_address(struct net_device *dev)
455{
456 /* Check Specific-Address 1 */
457 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L)))
458 return;
459 /* Check Specific-Address 2 */
460 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L)))
461 return;
462 /* Check Specific-Address 3 */
463 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L)))
464 return;
465 /* Check Specific-Address 4 */
466 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L)))
467 return;
468
469 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
470}
471
472/*
473 * Program the hardware MAC address from dev->dev_addr.
474 */
475static void update_mac_address(struct net_device *dev)
476{
477 at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
478 at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
479
480 at91_emac_write(AT91_EMAC_SA2L, 0);
481 at91_emac_write(AT91_EMAC_SA2H, 0);
482}
483
484/*
485 * Store the new hardware address in dev->dev_addr, and update the MAC.
486 */
487static int set_mac_address(struct net_device *dev, void* addr)
488{
489 struct sockaddr *address = addr;
490
491 if (!is_valid_ether_addr(address->sa_data))
492 return -EADDRNOTAVAIL;
493
494 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
495 update_mac_address(dev);
496
497 printk("%s: Setting MAC address to %pM\n", dev->name,
498 dev->dev_addr);
499
500 return 0;
501}
502
503static int inline hash_bit_value(int bitnr, __u8 *addr)
504{
505 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
506 return 1;
507 return 0;
508}
509
510/*
511 * The hash address register is 64 bits long and takes up two locations in the memory map.
512 * The least significant bits are stored in EMAC_HSL and the most significant
513 * bits in EMAC_HSH.
514 *
515 * The unicast hash enable and the multicast hash enable bits in the network configuration
516 * register enable the reception of hash matched frames. The destination address is
517 * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
518 * The hash function is an exclusive or of every sixth bit of the destination address.
519 * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
520 * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
521 * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
522 * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
523 * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
524 * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
525 * da[0] represents the least significant bit of the first byte received, that is, the multicast/
526 * unicast indicator, and da[47] represents the most significant bit of the last byte
527 * received.
528 * If the hash index points to a bit that is set in the hash register then the frame will be
529 * matched according to whether the frame is multicast or unicast.
530 * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
531 * the hash index points to a bit set in the hash register.
532 * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
533 * hash index points to a bit set in the hash register.
534 * To receive all multicast frames, the hash register should be set with all ones and the
535 * multicast hash enable bit should be set in the network configuration register.
536 */
537
538/*
539 * Return the hash index value for the specified address.
540 */
541static int hash_get_index(__u8 *addr)
542{
543 int i, j, bitval;
544 int hash_index = 0;
545
546 for (j = 0; j < 6; j++) {
547 for (i = 0, bitval = 0; i < 8; i++)
548 bitval ^= hash_bit_value(i*6 + j, addr);
549
550 hash_index |= (bitval << j);
551 }
552
553 return hash_index;
554}
555
556/*
557 * Add multicast addresses to the internal multicast-hash table.
558 */
559static void at91ether_sethashtable(struct net_device *dev)
560{
561 struct netdev_hw_addr *ha;
562 unsigned long mc_filter[2];
563 unsigned int bitnr;
564
565 mc_filter[0] = mc_filter[1] = 0;
566
567 netdev_for_each_mc_addr(ha, dev) {
568 bitnr = hash_get_index(ha->addr);
569 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
570 }
571
572 at91_emac_write(AT91_EMAC_HSL, mc_filter[0]);
573 at91_emac_write(AT91_EMAC_HSH, mc_filter[1]);
574}
575
576/*
577 * Enable/Disable promiscuous and multicast modes.
578 */
579static void at91ether_set_multicast_list(struct net_device *dev)
580{
581 unsigned long cfg;
582
583 cfg = at91_emac_read(AT91_EMAC_CFG);
584
585 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
586 cfg |= AT91_EMAC_CAF;
587 else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
588 cfg &= ~AT91_EMAC_CAF;
589
590 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
591 at91_emac_write(AT91_EMAC_HSH, -1);
592 at91_emac_write(AT91_EMAC_HSL, -1);
593 cfg |= AT91_EMAC_MTI;
594 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
595 at91ether_sethashtable(dev);
596 cfg |= AT91_EMAC_MTI;
597 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
598 at91_emac_write(AT91_EMAC_HSH, 0);
599 at91_emac_write(AT91_EMAC_HSL, 0);
600 cfg &= ~AT91_EMAC_MTI;
601 }
602
603 at91_emac_write(AT91_EMAC_CFG, cfg);
604}
605
606/* ......................... ETHTOOL SUPPORT ........................... */
607
608static int mdio_read(struct net_device *dev, int phy_id, int location)
609{
610 unsigned int value;
611
612 read_phy(phy_id, location, &value);
613 return value;
614}
615
616static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
617{
618 write_phy(phy_id, location, value);
619}
620
621static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
622{
623 struct at91_private *lp = netdev_priv(dev);
624 int ret;
625
626 spin_lock_irq(&lp->lock);
627 enable_mdi();
628
629 ret = mii_ethtool_gset(&lp->mii, cmd);
630
631 disable_mdi();
632 spin_unlock_irq(&lp->lock);
633
634 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
635 cmd->supported = SUPPORTED_FIBRE;
636 cmd->port = PORT_FIBRE;
637 }
638
639 return ret;
640}
641
642static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
643{
644 struct at91_private *lp = netdev_priv(dev);
645 int ret;
646
647 spin_lock_irq(&lp->lock);
648 enable_mdi();
649
650 ret = mii_ethtool_sset(&lp->mii, cmd);
651
652 disable_mdi();
653 spin_unlock_irq(&lp->lock);
654
655 return ret;
656}
657
658static int at91ether_nwayreset(struct net_device *dev)
659{
660 struct at91_private *lp = netdev_priv(dev);
661 int ret;
662
663 spin_lock_irq(&lp->lock);
664 enable_mdi();
665
666 ret = mii_nway_restart(&lp->mii);
667
668 disable_mdi();
669 spin_unlock_irq(&lp->lock);
670
671 return ret;
672}
673
674static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
675{
676 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
677 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
678 strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
679}
680
681static const struct ethtool_ops at91ether_ethtool_ops = {
682 .get_settings = at91ether_get_settings,
683 .set_settings = at91ether_set_settings,
684 .get_drvinfo = at91ether_get_drvinfo,
685 .nway_reset = at91ether_nwayreset,
686 .get_link = ethtool_op_get_link,
687};
688
689static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
690{
691 struct at91_private *lp = netdev_priv(dev);
692 int res;
693
694 if (!netif_running(dev))
695 return -EINVAL;
696
697 spin_lock_irq(&lp->lock);
698 enable_mdi();
699 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
700 disable_mdi();
701 spin_unlock_irq(&lp->lock);
702
703 return res;
704}
705
706/* ................................ MAC ................................ */
707
708/*
709 * Initialize and start the Receiver and Transmit subsystems
710 */
711static void at91ether_start(struct net_device *dev)
712{
713 struct at91_private *lp = netdev_priv(dev);
714 struct recv_desc_bufs *dlist, *dlist_phys;
715 int i;
716 unsigned long ctl;
717
718 dlist = lp->dlist;
719 dlist_phys = lp->dlist_phys;
720
721 for (i = 0; i < MAX_RX_DESCR; i++) {
722 dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
723 dlist->descriptors[i].size = 0;
724 }
725
726 /* Set the Wrap bit on the last descriptor */
727 dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
728
729 /* Reset buffer index */
730 lp->rxBuffIndex = 0;
731
732 /* Program address of descriptor list in Rx Buffer Queue register */
733 at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys);
734
735 /* Enable Receive and Transmit */
736 ctl = at91_emac_read(AT91_EMAC_CTL);
737 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
738}
739
740/*
741 * Open the ethernet interface
742 */
743static int at91ether_open(struct net_device *dev)
744{
745 struct at91_private *lp = netdev_priv(dev);
746 unsigned long ctl;
747
748 if (!is_valid_ether_addr(dev->dev_addr))
749 return -EADDRNOTAVAIL;
750
751 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
752
753 /* Clear internal statistics */
754 ctl = at91_emac_read(AT91_EMAC_CTL);
755 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
756
757 /* Update the MAC address (incase user has changed it) */
758 update_mac_address(dev);
759
760 /* Enable PHY interrupt */
761 enable_phyirq(dev);
762
763 /* Enable MAC interrupts */
764 at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
765 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
766 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
767
768 /* Determine current link speed */
769 spin_lock_irq(&lp->lock);
770 enable_mdi();
771 update_linkspeed(dev, 0);
772 disable_mdi();
773 spin_unlock_irq(&lp->lock);
774
775 at91ether_start(dev);
776 netif_start_queue(dev);
777 return 0;
778}
779
780/*
781 * Close the interface
782 */
783static int at91ether_close(struct net_device *dev)
784{
785 struct at91_private *lp = netdev_priv(dev);
786 unsigned long ctl;
787
788 /* Disable Receiver and Transmitter */
789 ctl = at91_emac_read(AT91_EMAC_CTL);
790 at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
791
792 /* Disable PHY interrupt */
793 disable_phyirq(dev);
794
795 /* Disable MAC interrupts */
796 at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
798 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
799
800 netif_stop_queue(dev);
801
802 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
803
804 return 0;
805}
806
807/*
808 * Transmit packet.
809 */
810static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
811{
812 struct at91_private *lp = netdev_priv(dev);
813
814 if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
815 netif_stop_queue(dev);
816
817 /* Store packet information (to free when Tx completed) */
818 lp->skb = skb;
819 lp->skb_length = skb->len;
820 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
821 dev->stats.tx_bytes += skb->len;
822
823 /* Set address of the data in the Transmit Address register */
824 at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
825 /* Set length of the packet in the Transmit Control register */
826 at91_emac_write(AT91_EMAC_TCR, skb->len);
827
828 } else {
829 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
830 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
831 on this skb, he also reports -ENETDOWN and printk's, so either
832 we free and return(0) or don't free and return 1 */
833 }
834
835 return NETDEV_TX_OK;
836}
837
838/*
839 * Update the current statistics from the internal statistics registers.
840 */
841static struct net_device_stats *at91ether_stats(struct net_device *dev)
842{
843 int ale, lenerr, seqe, lcol, ecol;
844
845 if (netif_running(dev)) {
846 dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
847 ale = at91_emac_read(AT91_EMAC_ALE);
848 dev->stats.rx_frame_errors += ale; /* Alignment errors */
849 lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
850 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
851 seqe = at91_emac_read(AT91_EMAC_SEQE);
852 dev->stats.rx_crc_errors += seqe; /* CRC error */
853 dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
854 dev->stats.rx_errors += (ale + lenerr + seqe
855 + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
856
857 dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
858 dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
859 dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
860 dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
861
862 lcol = at91_emac_read(AT91_EMAC_LCOL);
863 ecol = at91_emac_read(AT91_EMAC_ECOL);
864 dev->stats.tx_window_errors += lcol; /* Late collisions */
865 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
866
867 dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
868 }
869 return &dev->stats;
870}
871
872/*
873 * Extract received frame from buffer descriptors and sent to upper layers.
874 * (Called from interrupt context)
875 */
876static void at91ether_rx(struct net_device *dev)
877{
878 struct at91_private *lp = netdev_priv(dev);
879 struct recv_desc_bufs *dlist;
880 unsigned char *p_recv;
881 struct sk_buff *skb;
882 unsigned int pktlen;
883
884 dlist = lp->dlist;
885 while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
886 p_recv = dlist->recv_buf[lp->rxBuffIndex];
887 pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
888 skb = dev_alloc_skb(pktlen + 2);
889 if (skb != NULL) {
890 skb_reserve(skb, 2);
891 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
892
893 skb->protocol = eth_type_trans(skb, dev);
894 dev->stats.rx_bytes += pktlen;
895 netif_rx(skb);
896 }
897 else {
898 dev->stats.rx_dropped += 1;
899 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
900 }
901
902 if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
903 dev->stats.multicast++;
904
905 dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
906 if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
907 lp->rxBuffIndex = 0;
908 else
909 lp->rxBuffIndex++;
910 }
911}
912
913/*
914 * MAC interrupt handler
915 */
916static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
917{
918 struct net_device *dev = (struct net_device *) dev_id;
919 struct at91_private *lp = netdev_priv(dev);
920 unsigned long intstatus, ctl;
921
922 /* MAC Interrupt Status register indicates what interrupts are pending.
923 It is automatically cleared once read. */
924 intstatus = at91_emac_read(AT91_EMAC_ISR);
925
926 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
927 at91ether_rx(dev);
928
929 if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
930 /* The TCOM bit is set even if the transmission failed. */
931 if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
932 dev->stats.tx_errors += 1;
933
934 if (lp->skb) {
935 dev_kfree_skb_irq(lp->skb);
936 lp->skb = NULL;
937 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
938 }
939 netif_wake_queue(dev);
940 }
941
942 /* Work-around for Errata #11 */
943 if (intstatus & AT91_EMAC_RBNA) {
944 ctl = at91_emac_read(AT91_EMAC_CTL);
945 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
946 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
947 }
948
949 if (intstatus & AT91_EMAC_ROVR)
950 printk("%s: ROVR error\n", dev->name);
951
952 return IRQ_HANDLED;
953}
954
955#ifdef CONFIG_NET_POLL_CONTROLLER
956static void at91ether_poll_controller(struct net_device *dev)
957{
958 unsigned long flags;
959
960 local_irq_save(flags);
961 at91ether_interrupt(dev->irq, dev);
962 local_irq_restore(flags);
963}
964#endif
965
966static const struct net_device_ops at91ether_netdev_ops = {
967 .ndo_open = at91ether_open,
968 .ndo_stop = at91ether_close,
969 .ndo_start_xmit = at91ether_start_xmit,
970 .ndo_get_stats = at91ether_stats,
971 .ndo_set_multicast_list = at91ether_set_multicast_list,
972 .ndo_set_mac_address = set_mac_address,
973 .ndo_do_ioctl = at91ether_ioctl,
974 .ndo_validate_addr = eth_validate_addr,
975 .ndo_change_mtu = eth_change_mtu,
976#ifdef CONFIG_NET_POLL_CONTROLLER
977 .ndo_poll_controller = at91ether_poll_controller,
978#endif
979};
980
981/*
982 * Initialize the ethernet interface
983 */
984static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
985 struct platform_device *pdev, struct clk *ether_clk)
986{
987 struct at91_eth_data *board_data = pdev->dev.platform_data;
988 struct net_device *dev;
989 struct at91_private *lp;
990 unsigned int val;
991 int res;
992
993 dev = alloc_etherdev(sizeof(struct at91_private));
994 if (!dev)
995 return -ENOMEM;
996
997 dev->base_addr = AT91_VA_BASE_EMAC;
998 dev->irq = AT91RM9200_ID_EMAC;
999
1000 /* Install the interrupt handler */
1001 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
1002 free_netdev(dev);
1003 return -EBUSY;
1004 }
1005
1006 /* Allocate memory for DMA Receive descriptors */
1007 lp = netdev_priv(dev);
1008 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1009 if (lp->dlist == NULL) {
1010 free_irq(dev->irq, dev);
1011 free_netdev(dev);
1012 return -ENOMEM;
1013 }
1014 lp->board_data = *board_data;
1015 lp->ether_clk = ether_clk;
1016 platform_set_drvdata(pdev, dev);
1017
1018 spin_lock_init(&lp->lock);
1019
1020 ether_setup(dev);
1021 dev->netdev_ops = &at91ether_netdev_ops;
1022 dev->ethtool_ops = &at91ether_ethtool_ops;
1023
1024 SET_NETDEV_DEV(dev, &pdev->dev);
1025
1026 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
1027 update_mac_address(dev); /* Program ethernet address into MAC */
1028
1029 at91_emac_write(AT91_EMAC_CTL, 0);
1030
1031 if (lp->board_data.is_rmii)
1032 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
1033 else
1034 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
1035
1036 /* Perform PHY-specific initialization */
1037 spin_lock_irq(&lp->lock);
1038 enable_mdi();
1039 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
1040 read_phy(phy_address, MII_DSCR_REG, &val);
1041 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
1042 lp->phy_media = PORT_FIBRE;
1043 } else if (machine_is_csb337()) {
1044 /* mix link activity status into LED2 link state */
1045 write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
1046 } else if (machine_is_ecbat91())
1047 write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
1048
1049 disable_mdi();
1050 spin_unlock_irq(&lp->lock);
1051
1052 lp->mii.dev = dev; /* Support for ethtool */
1053 lp->mii.mdio_read = mdio_read;
1054 lp->mii.mdio_write = mdio_write;
1055 lp->mii.phy_id = phy_address;
1056 lp->mii.phy_id_mask = 0x1f;
1057 lp->mii.reg_num_mask = 0x1f;
1058
1059 lp->phy_type = phy_type; /* Type of PHY connected */
1060 lp->phy_address = phy_address; /* MDI address of PHY */
1061
1062 /* Register the network interface */
1063 res = register_netdev(dev);
1064 if (res) {
1065 free_irq(dev->irq, dev);
1066 free_netdev(dev);
1067 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1068 return res;
1069 }
1070
1071 /* Determine current link speed */
1072 spin_lock_irq(&lp->lock);
1073 enable_mdi();
1074 update_linkspeed(dev, 0);
1075 disable_mdi();
1076 spin_unlock_irq(&lp->lock);
1077 netif_carrier_off(dev); /* will be enabled in open() */
1078
1079 /* If board has no PHY IRQ, use a timer to poll the PHY */
1080 if (!lp->board_data.phy_irq_pin) {
1081 init_timer(&lp->check_timer);
1082 lp->check_timer.data = (unsigned long)dev;
1083 lp->check_timer.function = at91ether_check_link;
1084 } else if (lp->board_data.phy_irq_pin >= 32)
1085 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1086
1087 /* Display ethernet banner */
1088 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
1089 dev->name, (uint) dev->base_addr, dev->irq,
1090 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1091 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1092 dev->dev_addr);
1093 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1094 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1095 else if (phy_type == MII_LXT971A_ID)
1096 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1097 else if (phy_type == MII_RTL8201_ID)
1098 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1099 else if (phy_type == MII_BCM5221_ID)
1100 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1101 else if (phy_type == MII_DP83847_ID)
1102 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1103 else if (phy_type == MII_DP83848_ID)
1104 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1105 else if (phy_type == MII_AC101L_ID)
1106 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1107 else if (phy_type == MII_KS8721_ID)
1108 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1109 else if (phy_type == MII_T78Q21x3_ID)
1110 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1111 else if (phy_type == MII_LAN83C185_ID)
1112 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1113
1114 return 0;
1115}
1116
1117/*
1118 * Detect MAC and PHY and perform initialization
1119 */
1120static int __init at91ether_probe(struct platform_device *pdev)
1121{
1122 unsigned int phyid1, phyid2;
1123 int detected = -1;
1124 unsigned long phy_id;
1125 unsigned short phy_address = 0;
1126 struct clk *ether_clk;
1127
1128 ether_clk = clk_get(&pdev->dev, "ether_clk");
1129 if (IS_ERR(ether_clk)) {
1130 printk(KERN_ERR "at91_ether: no clock defined\n");
1131 return -ENODEV;
1132 }
1133 clk_enable(ether_clk); /* Enable Peripheral clock */
1134
1135 while ((detected != 0) && (phy_address < 32)) {
1136 /* Read the PHY ID registers */
1137 enable_mdi();
1138 read_phy(phy_address, MII_PHYSID1, &phyid1);
1139 read_phy(phy_address, MII_PHYSID2, &phyid2);
1140 disable_mdi();
1141
1142 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1143 switch (phy_id) {
1144 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1145 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1146 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1147 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1148 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1149 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1150 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1151 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1152 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1153 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1154 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1155 detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
1156 break;
1157 }
1158
1159 phy_address++;
1160 }
1161
1162 clk_disable(ether_clk); /* Disable Peripheral clock */
1163
1164 return detected;
1165}
1166
1167static int __devexit at91ether_remove(struct platform_device *pdev)
1168{
1169 struct net_device *dev = platform_get_drvdata(pdev);
1170 struct at91_private *lp = netdev_priv(dev);
1171
1172 if (lp->board_data.phy_irq_pin >= 32)
1173 gpio_free(lp->board_data.phy_irq_pin);
1174
1175 unregister_netdev(dev);
1176 free_irq(dev->irq, dev);
1177 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1178 clk_put(lp->ether_clk);
1179
1180 platform_set_drvdata(pdev, NULL);
1181 free_netdev(dev);
1182 return 0;
1183}
1184
1185#ifdef CONFIG_PM
1186
1187static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1188{
1189 struct net_device *net_dev = platform_get_drvdata(pdev);
1190 struct at91_private *lp = netdev_priv(net_dev);
1191 int phy_irq = lp->board_data.phy_irq_pin;
1192
1193 if (netif_running(net_dev)) {
1194 if (phy_irq)
1195 disable_irq(phy_irq);
1196
1197 netif_stop_queue(net_dev);
1198 netif_device_detach(net_dev);
1199
1200 clk_disable(lp->ether_clk);
1201 }
1202 return 0;
1203}
1204
1205static int at91ether_resume(struct platform_device *pdev)
1206{
1207 struct net_device *net_dev = platform_get_drvdata(pdev);
1208 struct at91_private *lp = netdev_priv(net_dev);
1209 int phy_irq = lp->board_data.phy_irq_pin;
1210
1211 if (netif_running(net_dev)) {
1212 clk_enable(lp->ether_clk);
1213
1214 netif_device_attach(net_dev);
1215 netif_start_queue(net_dev);
1216
1217 if (phy_irq)
1218 enable_irq(phy_irq);
1219 }
1220 return 0;
1221}
1222
1223#else
1224#define at91ether_suspend NULL
1225#define at91ether_resume NULL
1226#endif
1227
1228static struct platform_driver at91ether_driver = {
1229 .remove = __devexit_p(at91ether_remove),
1230 .suspend = at91ether_suspend,
1231 .resume = at91ether_resume,
1232 .driver = {
1233 .name = DRV_NAME,
1234 .owner = THIS_MODULE,
1235 },
1236};
1237
1238static int __init at91ether_init(void)
1239{
1240 return platform_driver_probe(&at91ether_driver, at91ether_probe);
1241}
1242
1243static void __exit at91ether_exit(void)
1244{
1245 platform_driver_unregister(&at91ether_driver);
1246}
1247
1248module_init(at91ether_init)
1249module_exit(at91ether_exit)
1250
1251MODULE_LICENSE("GPL");
1252MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
1253MODULE_AUTHOR("Andrew Victor");
1254MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
new file mode 100644
index 000000000000..353f4dab62be
--- /dev/null
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -0,0 +1,109 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef AT91_ETHERNET
16#define AT91_ETHERNET
17
18
19/* Davicom 9161 PHY */
20#define MII_DM9161_ID 0x0181b880
21#define MII_DM9161A_ID 0x0181b8a0
22#define MII_DSCR_REG 16
23#define MII_DSCSR_REG 17
24#define MII_DSINTR_REG 21
25
26/* Intel LXT971A PHY */
27#define MII_LXT971A_ID 0x001378E0
28#define MII_ISINTE_REG 18
29#define MII_ISINTS_REG 19
30#define MII_LEDCTRL_REG 20
31
32/* Realtek RTL8201 PHY */
33#define MII_RTL8201_ID 0x00008200
34
35/* Broadcom BCM5221 PHY */
36#define MII_BCM5221_ID 0x004061e0
37#define MII_BCMINTR_REG 26
38
39/* National Semiconductor DP83847 */
40#define MII_DP83847_ID 0x20005c30
41
42/* National Semiconductor DP83848 */
43#define MII_DP83848_ID 0x20005c90
44#define MII_DPPHYSTS_REG 16
45#define MII_DPMICR_REG 17
46#define MII_DPMISR_REG 18
47
48/* Altima AC101L PHY */
49#define MII_AC101L_ID 0x00225520
50
51/* Micrel KS8721 PHY */
52#define MII_KS8721_ID 0x00221610
53
54/* Teridian 78Q2123/78Q2133 */
55#define MII_T78Q21x3_ID 0x000e7230
56#define MII_T78Q21INT_REG 17
57
58/* SMSC LAN83C185 */
59#define MII_LAN83C185_ID 0x0007C0A0
60
61/* ........................................................................ */
62
63#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
64#define MAX_RX_DESCR 9 /* max number of receive buffers */
65
66#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
67#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
68
69#define EMAC_BROADCAST 0x80000000 /* broadcast address */
70#define EMAC_MULTICAST 0x40000000 /* multicast address */
71#define EMAC_UNICAST 0x20000000 /* unicast address */
72
73struct rbf_t
74{
75 unsigned int addr;
76 unsigned long size;
77};
78
79struct recv_desc_bufs
80{
81 struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
82 char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
83};
84
85struct at91_private
86{
87 struct mii_if_info mii; /* ethtool support */
88 struct at91_eth_data board_data; /* board-specific configuration */
89 struct clk *ether_clk; /* clock */
90
91 /* PHY */
92 unsigned long phy_type; /* type of PHY (PHY_ID) */
93 spinlock_t lock; /* lock for MDI interface */
94 short phy_media; /* media interface type */
95 unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
96 struct timer_list check_timer; /* Poll link status */
97
98 /* Transmit */
99 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
100 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
101 int skb_length; /* saved skb length for pci_unmap_single */
102
103 /* Receive */
104 int rxBuffIndex; /* index into receive descriptor list */
105 struct recv_desc_bufs *dlist; /* descriptor list address */
106 struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
107};
108
109#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
new file mode 100644
index 000000000000..dc4e305a1087
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -0,0 +1,1366 @@
1/*
2 * Atmel MACB Ethernet Controller driver
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/clk.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/platform_device.h>
23#include <linux/phy.h>
24
25#include <mach/board.h>
26#include <mach/cpu.h>
27
28#include "macb.h"
29
30#define RX_BUFFER_SIZE 128
31#define RX_RING_SIZE 512
32#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
33
34/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
35#define RX_OFFSET 2
36
37#define TX_RING_SIZE 128
38#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
39#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
40
41#define TX_RING_GAP(bp) \
42 (TX_RING_SIZE - (bp)->tx_pending)
43#define TX_BUFFS_AVAIL(bp) \
44 (((bp)->tx_tail <= (bp)->tx_head) ? \
45 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
46 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
47#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
48
49#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
50
51/* minimum number of free TX descriptors before waking up TX process */
52#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
53
54#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
55 | MACB_BIT(ISR_ROVR))
56
57static void __macb_set_hwaddr(struct macb *bp)
58{
59 u32 bottom;
60 u16 top;
61
62 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
63 macb_writel(bp, SA1B, bottom);
64 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
65 macb_writel(bp, SA1T, top);
66}
67
68static void __init macb_get_hwaddr(struct macb *bp)
69{
70 u32 bottom;
71 u16 top;
72 u8 addr[6];
73
74 bottom = macb_readl(bp, SA1B);
75 top = macb_readl(bp, SA1T);
76
77 addr[0] = bottom & 0xff;
78 addr[1] = (bottom >> 8) & 0xff;
79 addr[2] = (bottom >> 16) & 0xff;
80 addr[3] = (bottom >> 24) & 0xff;
81 addr[4] = top & 0xff;
82 addr[5] = (top >> 8) & 0xff;
83
84 if (is_valid_ether_addr(addr)) {
85 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
86 } else {
87 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
88 random_ether_addr(bp->dev->dev_addr);
89 }
90}
91
92static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
93{
94 struct macb *bp = bus->priv;
95 int value;
96
97 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
98 | MACB_BF(RW, MACB_MAN_READ)
99 | MACB_BF(PHYA, mii_id)
100 | MACB_BF(REGA, regnum)
101 | MACB_BF(CODE, MACB_MAN_CODE)));
102
103 /* wait for end of transfer */
104 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
105 cpu_relax();
106
107 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
108
109 return value;
110}
111
112static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
113 u16 value)
114{
115 struct macb *bp = bus->priv;
116
117 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
118 | MACB_BF(RW, MACB_MAN_WRITE)
119 | MACB_BF(PHYA, mii_id)
120 | MACB_BF(REGA, regnum)
121 | MACB_BF(CODE, MACB_MAN_CODE)
122 | MACB_BF(DATA, value)));
123
124 /* wait for end of transfer */
125 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
126 cpu_relax();
127
128 return 0;
129}
130
131static int macb_mdio_reset(struct mii_bus *bus)
132{
133 return 0;
134}
135
136static void macb_handle_link_change(struct net_device *dev)
137{
138 struct macb *bp = netdev_priv(dev);
139 struct phy_device *phydev = bp->phy_dev;
140 unsigned long flags;
141
142 int status_change = 0;
143
144 spin_lock_irqsave(&bp->lock, flags);
145
146 if (phydev->link) {
147 if ((bp->speed != phydev->speed) ||
148 (bp->duplex != phydev->duplex)) {
149 u32 reg;
150
151 reg = macb_readl(bp, NCFGR);
152 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
153
154 if (phydev->duplex)
155 reg |= MACB_BIT(FD);
156 if (phydev->speed == SPEED_100)
157 reg |= MACB_BIT(SPD);
158
159 macb_writel(bp, NCFGR, reg);
160
161 bp->speed = phydev->speed;
162 bp->duplex = phydev->duplex;
163 status_change = 1;
164 }
165 }
166
167 if (phydev->link != bp->link) {
168 if (!phydev->link) {
169 bp->speed = 0;
170 bp->duplex = -1;
171 }
172 bp->link = phydev->link;
173
174 status_change = 1;
175 }
176
177 spin_unlock_irqrestore(&bp->lock, flags);
178
179 if (status_change) {
180 if (phydev->link)
181 printk(KERN_INFO "%s: link up (%d/%s)\n",
182 dev->name, phydev->speed,
183 DUPLEX_FULL == phydev->duplex ? "Full":"Half");
184 else
185 printk(KERN_INFO "%s: link down\n", dev->name);
186 }
187}
188
189/* based on au1000_eth. c*/
190static int macb_mii_probe(struct net_device *dev)
191{
192 struct macb *bp = netdev_priv(dev);
193 struct phy_device *phydev;
194 struct eth_platform_data *pdata;
195 int ret;
196
197 phydev = phy_find_first(bp->mii_bus);
198 if (!phydev) {
199 printk (KERN_ERR "%s: no PHY found\n", dev->name);
200 return -1;
201 }
202
203 pdata = bp->pdev->dev.platform_data;
204 /* TODO : add pin_irq */
205
206 /* attach the mac to the phy */
207 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
208 pdata && pdata->is_rmii ?
209 PHY_INTERFACE_MODE_RMII :
210 PHY_INTERFACE_MODE_MII);
211 if (ret) {
212 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
213 return ret;
214 }
215
216 /* mask with MAC supported features */
217 phydev->supported &= PHY_BASIC_FEATURES;
218
219 phydev->advertising = phydev->supported;
220
221 bp->link = 0;
222 bp->speed = 0;
223 bp->duplex = -1;
224 bp->phy_dev = phydev;
225
226 return 0;
227}
228
229static int macb_mii_init(struct macb *bp)
230{
231 struct eth_platform_data *pdata;
232 int err = -ENXIO, i;
233
234 /* Enable management port */
235 macb_writel(bp, NCR, MACB_BIT(MPE));
236
237 bp->mii_bus = mdiobus_alloc();
238 if (bp->mii_bus == NULL) {
239 err = -ENOMEM;
240 goto err_out;
241 }
242
243 bp->mii_bus->name = "MACB_mii_bus";
244 bp->mii_bus->read = &macb_mdio_read;
245 bp->mii_bus->write = &macb_mdio_write;
246 bp->mii_bus->reset = &macb_mdio_reset;
247 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
248 bp->mii_bus->priv = bp;
249 bp->mii_bus->parent = &bp->dev->dev;
250 pdata = bp->pdev->dev.platform_data;
251
252 if (pdata)
253 bp->mii_bus->phy_mask = pdata->phy_mask;
254
255 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
256 if (!bp->mii_bus->irq) {
257 err = -ENOMEM;
258 goto err_out_free_mdiobus;
259 }
260
261 for (i = 0; i < PHY_MAX_ADDR; i++)
262 bp->mii_bus->irq[i] = PHY_POLL;
263
264 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
265
266 if (mdiobus_register(bp->mii_bus))
267 goto err_out_free_mdio_irq;
268
269 if (macb_mii_probe(bp->dev) != 0) {
270 goto err_out_unregister_bus;
271 }
272
273 return 0;
274
275err_out_unregister_bus:
276 mdiobus_unregister(bp->mii_bus);
277err_out_free_mdio_irq:
278 kfree(bp->mii_bus->irq);
279err_out_free_mdiobus:
280 mdiobus_free(bp->mii_bus);
281err_out:
282 return err;
283}
284
285static void macb_update_stats(struct macb *bp)
286{
287 u32 __iomem *reg = bp->regs + MACB_PFR;
288 u32 *p = &bp->hw_stats.rx_pause_frames;
289 u32 *end = &bp->hw_stats.tx_pause_frames + 1;
290
291 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
292
293 for(; p < end; p++, reg++)
294 *p += __raw_readl(reg);
295}
296
297static void macb_tx(struct macb *bp)
298{
299 unsigned int tail;
300 unsigned int head;
301 u32 status;
302
303 status = macb_readl(bp, TSR);
304 macb_writel(bp, TSR, status);
305
306 dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
307 (unsigned long)status);
308
309 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
310 int i;
311 printk(KERN_ERR "%s: TX %s, resetting buffers\n",
312 bp->dev->name, status & MACB_BIT(UND) ?
313 "underrun" : "retry limit exceeded");
314
315 /* Transfer ongoing, disable transmitter, to avoid confusion */
316 if (status & MACB_BIT(TGO))
317 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
318
319 head = bp->tx_head;
320
321 /*Mark all the buffer as used to avoid sending a lost buffer*/
322 for (i = 0; i < TX_RING_SIZE; i++)
323 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
324
325 /* Add wrap bit */
326 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
327
328 /* free transmit buffer in upper layer*/
329 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
330 struct ring_info *rp = &bp->tx_skb[tail];
331 struct sk_buff *skb = rp->skb;
332
333 BUG_ON(skb == NULL);
334
335 rmb();
336
337 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
338 DMA_TO_DEVICE);
339 rp->skb = NULL;
340 dev_kfree_skb_irq(skb);
341 }
342
343 bp->tx_head = bp->tx_tail = 0;
344
345 /* Enable the transmitter again */
346 if (status & MACB_BIT(TGO))
347 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
348 }
349
350 if (!(status & MACB_BIT(COMP)))
351 /*
352 * This may happen when a buffer becomes complete
353 * between reading the ISR and scanning the
354 * descriptors. Nothing to worry about.
355 */
356 return;
357
358 head = bp->tx_head;
359 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
360 struct ring_info *rp = &bp->tx_skb[tail];
361 struct sk_buff *skb = rp->skb;
362 u32 bufstat;
363
364 BUG_ON(skb == NULL);
365
366 rmb();
367 bufstat = bp->tx_ring[tail].ctrl;
368
369 if (!(bufstat & MACB_BIT(TX_USED)))
370 break;
371
372 dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
373 tail, skb->data);
374 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
375 DMA_TO_DEVICE);
376 bp->stats.tx_packets++;
377 bp->stats.tx_bytes += skb->len;
378 rp->skb = NULL;
379 dev_kfree_skb_irq(skb);
380 }
381
382 bp->tx_tail = tail;
383 if (netif_queue_stopped(bp->dev) &&
384 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
385 netif_wake_queue(bp->dev);
386}
387
388static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
389 unsigned int last_frag)
390{
391 unsigned int len;
392 unsigned int frag;
393 unsigned int offset = 0;
394 struct sk_buff *skb;
395
396 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
397
398 dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
399 first_frag, last_frag, len);
400
401 skb = dev_alloc_skb(len + RX_OFFSET);
402 if (!skb) {
403 bp->stats.rx_dropped++;
404 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
405 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
406 if (frag == last_frag)
407 break;
408 }
409 wmb();
410 return 1;
411 }
412
413 skb_reserve(skb, RX_OFFSET);
414 skb_checksum_none_assert(skb);
415 skb_put(skb, len);
416
417 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
418 unsigned int frag_len = RX_BUFFER_SIZE;
419
420 if (offset + frag_len > len) {
421 BUG_ON(frag != last_frag);
422 frag_len = len - offset;
423 }
424 skb_copy_to_linear_data_offset(skb, offset,
425 (bp->rx_buffers +
426 (RX_BUFFER_SIZE * frag)),
427 frag_len);
428 offset += RX_BUFFER_SIZE;
429 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
430 wmb();
431
432 if (frag == last_frag)
433 break;
434 }
435
436 skb->protocol = eth_type_trans(skb, bp->dev);
437
438 bp->stats.rx_packets++;
439 bp->stats.rx_bytes += len;
440 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
441 skb->len, skb->csum);
442 netif_receive_skb(skb);
443
444 return 0;
445}
446
447/* Mark DMA descriptors from begin up to and not including end as unused */
448static void discard_partial_frame(struct macb *bp, unsigned int begin,
449 unsigned int end)
450{
451 unsigned int frag;
452
453 for (frag = begin; frag != end; frag = NEXT_RX(frag))
454 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
455 wmb();
456
457 /*
458 * When this happens, the hardware stats registers for
459 * whatever caused this is updated, so we don't have to record
460 * anything.
461 */
462}
463
464static int macb_rx(struct macb *bp, int budget)
465{
466 int received = 0;
467 unsigned int tail = bp->rx_tail;
468 int first_frag = -1;
469
470 for (; budget > 0; tail = NEXT_RX(tail)) {
471 u32 addr, ctrl;
472
473 rmb();
474 addr = bp->rx_ring[tail].addr;
475 ctrl = bp->rx_ring[tail].ctrl;
476
477 if (!(addr & MACB_BIT(RX_USED)))
478 break;
479
480 if (ctrl & MACB_BIT(RX_SOF)) {
481 if (first_frag != -1)
482 discard_partial_frame(bp, first_frag, tail);
483 first_frag = tail;
484 }
485
486 if (ctrl & MACB_BIT(RX_EOF)) {
487 int dropped;
488 BUG_ON(first_frag == -1);
489
490 dropped = macb_rx_frame(bp, first_frag, tail);
491 first_frag = -1;
492 if (!dropped) {
493 received++;
494 budget--;
495 }
496 }
497 }
498
499 if (first_frag != -1)
500 bp->rx_tail = first_frag;
501 else
502 bp->rx_tail = tail;
503
504 return received;
505}
506
507static int macb_poll(struct napi_struct *napi, int budget)
508{
509 struct macb *bp = container_of(napi, struct macb, napi);
510 int work_done;
511 u32 status;
512
513 status = macb_readl(bp, RSR);
514 macb_writel(bp, RSR, status);
515
516 work_done = 0;
517
518 dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
519 (unsigned long)status, budget);
520
521 work_done = macb_rx(bp, budget);
522 if (work_done < budget) {
523 napi_complete(napi);
524
525 /*
526 * We've done what we can to clean the buffers. Make sure we
527 * get notified when new packets arrive.
528 */
529 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
530 }
531
532 /* TODO: Handle errors */
533
534 return work_done;
535}
536
537static irqreturn_t macb_interrupt(int irq, void *dev_id)
538{
539 struct net_device *dev = dev_id;
540 struct macb *bp = netdev_priv(dev);
541 u32 status;
542
543 status = macb_readl(bp, ISR);
544
545 if (unlikely(!status))
546 return IRQ_NONE;
547
548 spin_lock(&bp->lock);
549
550 while (status) {
551 /* close possible race with dev_close */
552 if (unlikely(!netif_running(dev))) {
553 macb_writel(bp, IDR, ~0UL);
554 break;
555 }
556
557 if (status & MACB_RX_INT_FLAGS) {
558 /*
559 * There's no point taking any more interrupts
560 * until we have processed the buffers. The
561 * scheduling call may fail if the poll routine
562 * is already scheduled, so disable interrupts
563 * now.
564 */
565 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
566
567 if (napi_schedule_prep(&bp->napi)) {
568 dev_dbg(&bp->pdev->dev,
569 "scheduling RX softirq\n");
570 __napi_schedule(&bp->napi);
571 }
572 }
573
574 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
575 MACB_BIT(ISR_RLE)))
576 macb_tx(bp);
577
578 /*
579 * Link change detection isn't possible with RMII, so we'll
580 * add that if/when we get our hands on a full-blown MII PHY.
581 */
582
583 if (status & MACB_BIT(ISR_ROVR)) {
584 /* We missed at least one packet */
585 bp->hw_stats.rx_overruns++;
586 }
587
588 if (status & MACB_BIT(HRESP)) {
589 /*
590 * TODO: Reset the hardware, and maybe move the printk
591 * to a lower-priority context as well (work queue?)
592 */
593 printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
594 dev->name);
595 }
596
597 status = macb_readl(bp, ISR);
598 }
599
600 spin_unlock(&bp->lock);
601
602 return IRQ_HANDLED;
603}
604
605#ifdef CONFIG_NET_POLL_CONTROLLER
606/*
607 * Polling receive - used by netconsole and other diagnostic tools
608 * to allow network i/o with interrupts disabled.
609 */
610static void macb_poll_controller(struct net_device *dev)
611{
612 unsigned long flags;
613
614 local_irq_save(flags);
615 macb_interrupt(dev->irq, dev);
616 local_irq_restore(flags);
617}
618#endif
619
620static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
621{
622 struct macb *bp = netdev_priv(dev);
623 dma_addr_t mapping;
624 unsigned int len, entry;
625 u32 ctrl;
626 unsigned long flags;
627
628#ifdef DEBUG
629 int i;
630 dev_dbg(&bp->pdev->dev,
631 "start_xmit: len %u head %p data %p tail %p end %p\n",
632 skb->len, skb->head, skb->data,
633 skb_tail_pointer(skb), skb_end_pointer(skb));
634 dev_dbg(&bp->pdev->dev,
635 "data:");
636 for (i = 0; i < 16; i++)
637 printk(" %02x", (unsigned int)skb->data[i]);
638 printk("\n");
639#endif
640
641 len = skb->len;
642 spin_lock_irqsave(&bp->lock, flags);
643
644 /* This is a hard error, log it. */
645 if (TX_BUFFS_AVAIL(bp) < 1) {
646 netif_stop_queue(dev);
647 spin_unlock_irqrestore(&bp->lock, flags);
648 dev_err(&bp->pdev->dev,
649 "BUG! Tx Ring full when queue awake!\n");
650 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
651 bp->tx_head, bp->tx_tail);
652 return NETDEV_TX_BUSY;
653 }
654
655 entry = bp->tx_head;
656 dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
657 mapping = dma_map_single(&bp->pdev->dev, skb->data,
658 len, DMA_TO_DEVICE);
659 bp->tx_skb[entry].skb = skb;
660 bp->tx_skb[entry].mapping = mapping;
661 dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
662 skb->data, (unsigned long)mapping);
663
664 ctrl = MACB_BF(TX_FRMLEN, len);
665 ctrl |= MACB_BIT(TX_LAST);
666 if (entry == (TX_RING_SIZE - 1))
667 ctrl |= MACB_BIT(TX_WRAP);
668
669 bp->tx_ring[entry].addr = mapping;
670 bp->tx_ring[entry].ctrl = ctrl;
671 wmb();
672
673 entry = NEXT_TX(entry);
674 bp->tx_head = entry;
675
676 skb_tx_timestamp(skb);
677
678 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
679
680 if (TX_BUFFS_AVAIL(bp) < 1)
681 netif_stop_queue(dev);
682
683 spin_unlock_irqrestore(&bp->lock, flags);
684
685 return NETDEV_TX_OK;
686}
687
688static void macb_free_consistent(struct macb *bp)
689{
690 if (bp->tx_skb) {
691 kfree(bp->tx_skb);
692 bp->tx_skb = NULL;
693 }
694 if (bp->rx_ring) {
695 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
696 bp->rx_ring, bp->rx_ring_dma);
697 bp->rx_ring = NULL;
698 }
699 if (bp->tx_ring) {
700 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
701 bp->tx_ring, bp->tx_ring_dma);
702 bp->tx_ring = NULL;
703 }
704 if (bp->rx_buffers) {
705 dma_free_coherent(&bp->pdev->dev,
706 RX_RING_SIZE * RX_BUFFER_SIZE,
707 bp->rx_buffers, bp->rx_buffers_dma);
708 bp->rx_buffers = NULL;
709 }
710}
711
712static int macb_alloc_consistent(struct macb *bp)
713{
714 int size;
715
716 size = TX_RING_SIZE * sizeof(struct ring_info);
717 bp->tx_skb = kmalloc(size, GFP_KERNEL);
718 if (!bp->tx_skb)
719 goto out_err;
720
721 size = RX_RING_BYTES;
722 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
723 &bp->rx_ring_dma, GFP_KERNEL);
724 if (!bp->rx_ring)
725 goto out_err;
726 dev_dbg(&bp->pdev->dev,
727 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
728 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
729
730 size = TX_RING_BYTES;
731 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
732 &bp->tx_ring_dma, GFP_KERNEL);
733 if (!bp->tx_ring)
734 goto out_err;
735 dev_dbg(&bp->pdev->dev,
736 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
737 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
738
739 size = RX_RING_SIZE * RX_BUFFER_SIZE;
740 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
741 &bp->rx_buffers_dma, GFP_KERNEL);
742 if (!bp->rx_buffers)
743 goto out_err;
744 dev_dbg(&bp->pdev->dev,
745 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
746 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
747
748 return 0;
749
750out_err:
751 macb_free_consistent(bp);
752 return -ENOMEM;
753}
754
755static void macb_init_rings(struct macb *bp)
756{
757 int i;
758 dma_addr_t addr;
759
760 addr = bp->rx_buffers_dma;
761 for (i = 0; i < RX_RING_SIZE; i++) {
762 bp->rx_ring[i].addr = addr;
763 bp->rx_ring[i].ctrl = 0;
764 addr += RX_BUFFER_SIZE;
765 }
766 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
767
768 for (i = 0; i < TX_RING_SIZE; i++) {
769 bp->tx_ring[i].addr = 0;
770 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
771 }
772 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
773
774 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
775}
776
777static void macb_reset_hw(struct macb *bp)
778{
779 /* Make sure we have the write buffer for ourselves */
780 wmb();
781
782 /*
783 * Disable RX and TX (XXX: Should we halt the transmission
784 * more gracefully?)
785 */
786 macb_writel(bp, NCR, 0);
787
788 /* Clear the stats registers (XXX: Update stats first?) */
789 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
790
791 /* Clear all status flags */
792 macb_writel(bp, TSR, ~0UL);
793 macb_writel(bp, RSR, ~0UL);
794
795 /* Disable all interrupts */
796 macb_writel(bp, IDR, ~0UL);
797 macb_readl(bp, ISR);
798}
799
800static void macb_init_hw(struct macb *bp)
801{
802 u32 config;
803
804 macb_reset_hw(bp);
805 __macb_set_hwaddr(bp);
806
807 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
808 config |= MACB_BIT(PAE); /* PAuse Enable */
809 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
810 config |= MACB_BIT(BIG); /* Receive oversized frames */
811 if (bp->dev->flags & IFF_PROMISC)
812 config |= MACB_BIT(CAF); /* Copy All Frames */
813 if (!(bp->dev->flags & IFF_BROADCAST))
814 config |= MACB_BIT(NBC); /* No BroadCast */
815 macb_writel(bp, NCFGR, config);
816
817 /* Initialize TX and RX buffers */
818 macb_writel(bp, RBQP, bp->rx_ring_dma);
819 macb_writel(bp, TBQP, bp->tx_ring_dma);
820
821 /* Enable TX and RX */
822 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
823
824 /* Enable interrupts */
825 macb_writel(bp, IER, (MACB_BIT(RCOMP)
826 | MACB_BIT(RXUBR)
827 | MACB_BIT(ISR_TUND)
828 | MACB_BIT(ISR_RLE)
829 | MACB_BIT(TXERR)
830 | MACB_BIT(TCOMP)
831 | MACB_BIT(ISR_ROVR)
832 | MACB_BIT(HRESP)));
833
834}
835
836/*
837 * The hash address register is 64 bits long and takes up two
838 * locations in the memory map. The least significant bits are stored
839 * in EMAC_HSL and the most significant bits in EMAC_HSH.
840 *
841 * The unicast hash enable and the multicast hash enable bits in the
842 * network configuration register enable the reception of hash matched
843 * frames. The destination address is reduced to a 6 bit index into
844 * the 64 bit hash register using the following hash function. The
845 * hash function is an exclusive or of every sixth bit of the
846 * destination address.
847 *
848 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
849 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
850 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
851 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
852 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
853 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
854 *
855 * da[0] represents the least significant bit of the first byte
856 * received, that is, the multicast/unicast indicator, and da[47]
857 * represents the most significant bit of the last byte received. If
858 * the hash index, hi[n], points to a bit that is set in the hash
859 * register then the frame will be matched according to whether the
860 * frame is multicast or unicast. A multicast match will be signalled
861 * if the multicast hash enable bit is set, da[0] is 1 and the hash
862 * index points to a bit set in the hash register. A unicast match
863 * will be signalled if the unicast hash enable bit is set, da[0] is 0
864 * and the hash index points to a bit set in the hash register. To
865 * receive all multicast frames, the hash register should be set with
866 * all ones and the multicast hash enable bit should be set in the
867 * network configuration register.
868 */
869
870static inline int hash_bit_value(int bitnr, __u8 *addr)
871{
872 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
873 return 1;
874 return 0;
875}
876
877/*
878 * Return the hash index value for the specified address.
879 */
880static int hash_get_index(__u8 *addr)
881{
882 int i, j, bitval;
883 int hash_index = 0;
884
885 for (j = 0; j < 6; j++) {
886 for (i = 0, bitval = 0; i < 8; i++)
887 bitval ^= hash_bit_value(i*6 + j, addr);
888
889 hash_index |= (bitval << j);
890 }
891
892 return hash_index;
893}
894
895/*
896 * Add multicast addresses to the internal multicast-hash table.
897 */
898static void macb_sethashtable(struct net_device *dev)
899{
900 struct netdev_hw_addr *ha;
901 unsigned long mc_filter[2];
902 unsigned int bitnr;
903 struct macb *bp = netdev_priv(dev);
904
905 mc_filter[0] = mc_filter[1] = 0;
906
907 netdev_for_each_mc_addr(ha, dev) {
908 bitnr = hash_get_index(ha->addr);
909 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
910 }
911
912 macb_writel(bp, HRB, mc_filter[0]);
913 macb_writel(bp, HRT, mc_filter[1]);
914}
915
916/*
917 * Enable/Disable promiscuous and multicast modes.
918 */
919static void macb_set_rx_mode(struct net_device *dev)
920{
921 unsigned long cfg;
922 struct macb *bp = netdev_priv(dev);
923
924 cfg = macb_readl(bp, NCFGR);
925
926 if (dev->flags & IFF_PROMISC)
927 /* Enable promiscuous mode */
928 cfg |= MACB_BIT(CAF);
929 else if (dev->flags & (~IFF_PROMISC))
930 /* Disable promiscuous mode */
931 cfg &= ~MACB_BIT(CAF);
932
933 if (dev->flags & IFF_ALLMULTI) {
934 /* Enable all multicast mode */
935 macb_writel(bp, HRB, -1);
936 macb_writel(bp, HRT, -1);
937 cfg |= MACB_BIT(NCFGR_MTI);
938 } else if (!netdev_mc_empty(dev)) {
939 /* Enable specific multicasts */
940 macb_sethashtable(dev);
941 cfg |= MACB_BIT(NCFGR_MTI);
942 } else if (dev->flags & (~IFF_ALLMULTI)) {
943 /* Disable all multicast mode */
944 macb_writel(bp, HRB, 0);
945 macb_writel(bp, HRT, 0);
946 cfg &= ~MACB_BIT(NCFGR_MTI);
947 }
948
949 macb_writel(bp, NCFGR, cfg);
950}
951
952static int macb_open(struct net_device *dev)
953{
954 struct macb *bp = netdev_priv(dev);
955 int err;
956
957 dev_dbg(&bp->pdev->dev, "open\n");
958
959 /* if the phy is not yet register, retry later*/
960 if (!bp->phy_dev)
961 return -EAGAIN;
962
963 if (!is_valid_ether_addr(dev->dev_addr))
964 return -EADDRNOTAVAIL;
965
966 err = macb_alloc_consistent(bp);
967 if (err) {
968 printk(KERN_ERR
969 "%s: Unable to allocate DMA memory (error %d)\n",
970 dev->name, err);
971 return err;
972 }
973
974 napi_enable(&bp->napi);
975
976 macb_init_rings(bp);
977 macb_init_hw(bp);
978
979 /* schedule a link state check */
980 phy_start(bp->phy_dev);
981
982 netif_start_queue(dev);
983
984 return 0;
985}
986
987static int macb_close(struct net_device *dev)
988{
989 struct macb *bp = netdev_priv(dev);
990 unsigned long flags;
991
992 netif_stop_queue(dev);
993 napi_disable(&bp->napi);
994
995 if (bp->phy_dev)
996 phy_stop(bp->phy_dev);
997
998 spin_lock_irqsave(&bp->lock, flags);
999 macb_reset_hw(bp);
1000 netif_carrier_off(dev);
1001 spin_unlock_irqrestore(&bp->lock, flags);
1002
1003 macb_free_consistent(bp);
1004
1005 return 0;
1006}
1007
1008static struct net_device_stats *macb_get_stats(struct net_device *dev)
1009{
1010 struct macb *bp = netdev_priv(dev);
1011 struct net_device_stats *nstat = &bp->stats;
1012 struct macb_stats *hwstat = &bp->hw_stats;
1013
1014 /* read stats from hardware */
1015 macb_update_stats(bp);
1016
1017 /* Convert HW stats into netdevice stats */
1018 nstat->rx_errors = (hwstat->rx_fcs_errors +
1019 hwstat->rx_align_errors +
1020 hwstat->rx_resource_errors +
1021 hwstat->rx_overruns +
1022 hwstat->rx_oversize_pkts +
1023 hwstat->rx_jabbers +
1024 hwstat->rx_undersize_pkts +
1025 hwstat->sqe_test_errors +
1026 hwstat->rx_length_mismatch);
1027 nstat->tx_errors = (hwstat->tx_late_cols +
1028 hwstat->tx_excessive_cols +
1029 hwstat->tx_underruns +
1030 hwstat->tx_carrier_errors);
1031 nstat->collisions = (hwstat->tx_single_cols +
1032 hwstat->tx_multiple_cols +
1033 hwstat->tx_excessive_cols);
1034 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1035 hwstat->rx_jabbers +
1036 hwstat->rx_undersize_pkts +
1037 hwstat->rx_length_mismatch);
1038 nstat->rx_over_errors = hwstat->rx_resource_errors +
1039 hwstat->rx_overruns;
1040 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1041 nstat->rx_frame_errors = hwstat->rx_align_errors;
1042 nstat->rx_fifo_errors = hwstat->rx_overruns;
1043 /* XXX: What does "missed" mean? */
1044 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1045 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1046 nstat->tx_fifo_errors = hwstat->tx_underruns;
1047 /* Don't know about heartbeat or window errors... */
1048
1049 return nstat;
1050}
1051
1052static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1053{
1054 struct macb *bp = netdev_priv(dev);
1055 struct phy_device *phydev = bp->phy_dev;
1056
1057 if (!phydev)
1058 return -ENODEV;
1059
1060 return phy_ethtool_gset(phydev, cmd);
1061}
1062
1063static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1064{
1065 struct macb *bp = netdev_priv(dev);
1066 struct phy_device *phydev = bp->phy_dev;
1067
1068 if (!phydev)
1069 return -ENODEV;
1070
1071 return phy_ethtool_sset(phydev, cmd);
1072}
1073
1074static void macb_get_drvinfo(struct net_device *dev,
1075 struct ethtool_drvinfo *info)
1076{
1077 struct macb *bp = netdev_priv(dev);
1078
1079 strcpy(info->driver, bp->pdev->dev.driver->name);
1080 strcpy(info->version, "$Revision: 1.14 $");
1081 strcpy(info->bus_info, dev_name(&bp->pdev->dev));
1082}
1083
1084static const struct ethtool_ops macb_ethtool_ops = {
1085 .get_settings = macb_get_settings,
1086 .set_settings = macb_set_settings,
1087 .get_drvinfo = macb_get_drvinfo,
1088 .get_link = ethtool_op_get_link,
1089};
1090
1091static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1092{
1093 struct macb *bp = netdev_priv(dev);
1094 struct phy_device *phydev = bp->phy_dev;
1095
1096 if (!netif_running(dev))
1097 return -EINVAL;
1098
1099 if (!phydev)
1100 return -ENODEV;
1101
1102 return phy_mii_ioctl(phydev, rq, cmd);
1103}
1104
1105static const struct net_device_ops macb_netdev_ops = {
1106 .ndo_open = macb_open,
1107 .ndo_stop = macb_close,
1108 .ndo_start_xmit = macb_start_xmit,
1109 .ndo_set_multicast_list = macb_set_rx_mode,
1110 .ndo_get_stats = macb_get_stats,
1111 .ndo_do_ioctl = macb_ioctl,
1112 .ndo_validate_addr = eth_validate_addr,
1113 .ndo_change_mtu = eth_change_mtu,
1114 .ndo_set_mac_address = eth_mac_addr,
1115#ifdef CONFIG_NET_POLL_CONTROLLER
1116 .ndo_poll_controller = macb_poll_controller,
1117#endif
1118};
1119
1120static int __init macb_probe(struct platform_device *pdev)
1121{
1122 struct eth_platform_data *pdata;
1123 struct resource *regs;
1124 struct net_device *dev;
1125 struct macb *bp;
1126 struct phy_device *phydev;
1127 unsigned long pclk_hz;
1128 u32 config;
1129 int err = -ENXIO;
1130
1131 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1132 if (!regs) {
1133 dev_err(&pdev->dev, "no mmio resource defined\n");
1134 goto err_out;
1135 }
1136
1137 err = -ENOMEM;
1138 dev = alloc_etherdev(sizeof(*bp));
1139 if (!dev) {
1140 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
1141 goto err_out;
1142 }
1143
1144 SET_NETDEV_DEV(dev, &pdev->dev);
1145
1146 /* TODO: Actually, we have some interesting features... */
1147 dev->features |= 0;
1148
1149 bp = netdev_priv(dev);
1150 bp->pdev = pdev;
1151 bp->dev = dev;
1152
1153 spin_lock_init(&bp->lock);
1154
1155#if defined(CONFIG_ARCH_AT91)
1156 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1157 if (IS_ERR(bp->pclk)) {
1158 dev_err(&pdev->dev, "failed to get macb_clk\n");
1159 goto err_out_free_dev;
1160 }
1161 clk_enable(bp->pclk);
1162#else
1163 bp->pclk = clk_get(&pdev->dev, "pclk");
1164 if (IS_ERR(bp->pclk)) {
1165 dev_err(&pdev->dev, "failed to get pclk\n");
1166 goto err_out_free_dev;
1167 }
1168 bp->hclk = clk_get(&pdev->dev, "hclk");
1169 if (IS_ERR(bp->hclk)) {
1170 dev_err(&pdev->dev, "failed to get hclk\n");
1171 goto err_out_put_pclk;
1172 }
1173
1174 clk_enable(bp->pclk);
1175 clk_enable(bp->hclk);
1176#endif
1177
1178 bp->regs = ioremap(regs->start, resource_size(regs));
1179 if (!bp->regs) {
1180 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1181 err = -ENOMEM;
1182 goto err_out_disable_clocks;
1183 }
1184
1185 dev->irq = platform_get_irq(pdev, 0);
1186 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1187 if (err) {
1188 printk(KERN_ERR
1189 "%s: Unable to request IRQ %d (error %d)\n",
1190 dev->name, dev->irq, err);
1191 goto err_out_iounmap;
1192 }
1193
1194 dev->netdev_ops = &macb_netdev_ops;
1195 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1196 dev->ethtool_ops = &macb_ethtool_ops;
1197
1198 dev->base_addr = regs->start;
1199
1200 /* Set MII management clock divider */
1201 pclk_hz = clk_get_rate(bp->pclk);
1202 if (pclk_hz <= 20000000)
1203 config = MACB_BF(CLK, MACB_CLK_DIV8);
1204 else if (pclk_hz <= 40000000)
1205 config = MACB_BF(CLK, MACB_CLK_DIV16);
1206 else if (pclk_hz <= 80000000)
1207 config = MACB_BF(CLK, MACB_CLK_DIV32);
1208 else
1209 config = MACB_BF(CLK, MACB_CLK_DIV64);
1210 macb_writel(bp, NCFGR, config);
1211
1212 macb_get_hwaddr(bp);
1213 pdata = pdev->dev.platform_data;
1214
1215 if (pdata && pdata->is_rmii)
1216#if defined(CONFIG_ARCH_AT91)
1217 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1218#else
1219 macb_writel(bp, USRIO, 0);
1220#endif
1221 else
1222#if defined(CONFIG_ARCH_AT91)
1223 macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1224#else
1225 macb_writel(bp, USRIO, MACB_BIT(MII));
1226#endif
1227
1228 bp->tx_pending = DEF_TX_RING_PENDING;
1229
1230 err = register_netdev(dev);
1231 if (err) {
1232 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1233 goto err_out_free_irq;
1234 }
1235
1236 if (macb_mii_init(bp) != 0) {
1237 goto err_out_unregister_netdev;
1238 }
1239
1240 platform_set_drvdata(pdev, dev);
1241
1242 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
1243 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
1244
1245 phydev = bp->phy_dev;
1246 printk(KERN_INFO "%s: attached PHY driver [%s] "
1247 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
1248 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1249
1250 return 0;
1251
1252err_out_unregister_netdev:
1253 unregister_netdev(dev);
1254err_out_free_irq:
1255 free_irq(dev->irq, dev);
1256err_out_iounmap:
1257 iounmap(bp->regs);
1258err_out_disable_clocks:
1259#ifndef CONFIG_ARCH_AT91
1260 clk_disable(bp->hclk);
1261 clk_put(bp->hclk);
1262#endif
1263 clk_disable(bp->pclk);
1264#ifndef CONFIG_ARCH_AT91
1265err_out_put_pclk:
1266#endif
1267 clk_put(bp->pclk);
1268err_out_free_dev:
1269 free_netdev(dev);
1270err_out:
1271 platform_set_drvdata(pdev, NULL);
1272 return err;
1273}
1274
1275static int __exit macb_remove(struct platform_device *pdev)
1276{
1277 struct net_device *dev;
1278 struct macb *bp;
1279
1280 dev = platform_get_drvdata(pdev);
1281
1282 if (dev) {
1283 bp = netdev_priv(dev);
1284 if (bp->phy_dev)
1285 phy_disconnect(bp->phy_dev);
1286 mdiobus_unregister(bp->mii_bus);
1287 kfree(bp->mii_bus->irq);
1288 mdiobus_free(bp->mii_bus);
1289 unregister_netdev(dev);
1290 free_irq(dev->irq, dev);
1291 iounmap(bp->regs);
1292#ifndef CONFIG_ARCH_AT91
1293 clk_disable(bp->hclk);
1294 clk_put(bp->hclk);
1295#endif
1296 clk_disable(bp->pclk);
1297 clk_put(bp->pclk);
1298 free_netdev(dev);
1299 platform_set_drvdata(pdev, NULL);
1300 }
1301
1302 return 0;
1303}
1304
1305#ifdef CONFIG_PM
1306static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1307{
1308 struct net_device *netdev = platform_get_drvdata(pdev);
1309 struct macb *bp = netdev_priv(netdev);
1310
1311 netif_device_detach(netdev);
1312
1313#ifndef CONFIG_ARCH_AT91
1314 clk_disable(bp->hclk);
1315#endif
1316 clk_disable(bp->pclk);
1317
1318 return 0;
1319}
1320
1321static int macb_resume(struct platform_device *pdev)
1322{
1323 struct net_device *netdev = platform_get_drvdata(pdev);
1324 struct macb *bp = netdev_priv(netdev);
1325
1326 clk_enable(bp->pclk);
1327#ifndef CONFIG_ARCH_AT91
1328 clk_enable(bp->hclk);
1329#endif
1330
1331 netif_device_attach(netdev);
1332
1333 return 0;
1334}
1335#else
1336#define macb_suspend NULL
1337#define macb_resume NULL
1338#endif
1339
1340static struct platform_driver macb_driver = {
1341 .remove = __exit_p(macb_remove),
1342 .suspend = macb_suspend,
1343 .resume = macb_resume,
1344 .driver = {
1345 .name = "macb",
1346 .owner = THIS_MODULE,
1347 },
1348};
1349
1350static int __init macb_init(void)
1351{
1352 return platform_driver_probe(&macb_driver, macb_probe);
1353}
1354
1355static void __exit macb_exit(void)
1356{
1357 platform_driver_unregister(&macb_driver);
1358}
1359
1360module_init(macb_init);
1361module_exit(macb_exit);
1362
1363MODULE_LICENSE("GPL");
1364MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
1365MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1366MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
new file mode 100644
index 000000000000..d3212f6db703
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -0,0 +1,394 @@
1/*
2 * Atmel MACB Ethernet Controller driver
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _MACB_H
11#define _MACB_H
12
13/* MACB register offsets */
14#define MACB_NCR 0x0000
15#define MACB_NCFGR 0x0004
16#define MACB_NSR 0x0008
17#define MACB_TSR 0x0014
18#define MACB_RBQP 0x0018
19#define MACB_TBQP 0x001c
20#define MACB_RSR 0x0020
21#define MACB_ISR 0x0024
22#define MACB_IER 0x0028
23#define MACB_IDR 0x002c
24#define MACB_IMR 0x0030
25#define MACB_MAN 0x0034
26#define MACB_PTR 0x0038
27#define MACB_PFR 0x003c
28#define MACB_FTO 0x0040
29#define MACB_SCF 0x0044
30#define MACB_MCF 0x0048
31#define MACB_FRO 0x004c
32#define MACB_FCSE 0x0050
33#define MACB_ALE 0x0054
34#define MACB_DTF 0x0058
35#define MACB_LCOL 0x005c
36#define MACB_EXCOL 0x0060
37#define MACB_TUND 0x0064
38#define MACB_CSE 0x0068
39#define MACB_RRE 0x006c
40#define MACB_ROVR 0x0070
41#define MACB_RSE 0x0074
42#define MACB_ELE 0x0078
43#define MACB_RJA 0x007c
44#define MACB_USF 0x0080
45#define MACB_STE 0x0084
46#define MACB_RLE 0x0088
47#define MACB_TPF 0x008c
48#define MACB_HRB 0x0090
49#define MACB_HRT 0x0094
50#define MACB_SA1B 0x0098
51#define MACB_SA1T 0x009c
52#define MACB_SA2B 0x00a0
53#define MACB_SA2T 0x00a4
54#define MACB_SA3B 0x00a8
55#define MACB_SA3T 0x00ac
56#define MACB_SA4B 0x00b0
57#define MACB_SA4T 0x00b4
58#define MACB_TID 0x00b8
59#define MACB_TPQ 0x00bc
60#define MACB_USRIO 0x00c0
61#define MACB_WOL 0x00c4
62
63/* Bitfields in NCR */
64#define MACB_LB_OFFSET 0
65#define MACB_LB_SIZE 1
66#define MACB_LLB_OFFSET 1
67#define MACB_LLB_SIZE 1
68#define MACB_RE_OFFSET 2
69#define MACB_RE_SIZE 1
70#define MACB_TE_OFFSET 3
71#define MACB_TE_SIZE 1
72#define MACB_MPE_OFFSET 4
73#define MACB_MPE_SIZE 1
74#define MACB_CLRSTAT_OFFSET 5
75#define MACB_CLRSTAT_SIZE 1
76#define MACB_INCSTAT_OFFSET 6
77#define MACB_INCSTAT_SIZE 1
78#define MACB_WESTAT_OFFSET 7
79#define MACB_WESTAT_SIZE 1
80#define MACB_BP_OFFSET 8
81#define MACB_BP_SIZE 1
82#define MACB_TSTART_OFFSET 9
83#define MACB_TSTART_SIZE 1
84#define MACB_THALT_OFFSET 10
85#define MACB_THALT_SIZE 1
86#define MACB_NCR_TPF_OFFSET 11
87#define MACB_NCR_TPF_SIZE 1
88#define MACB_TZQ_OFFSET 12
89#define MACB_TZQ_SIZE 1
90
91/* Bitfields in NCFGR */
92#define MACB_SPD_OFFSET 0
93#define MACB_SPD_SIZE 1
94#define MACB_FD_OFFSET 1
95#define MACB_FD_SIZE 1
96#define MACB_BIT_RATE_OFFSET 2
97#define MACB_BIT_RATE_SIZE 1
98#define MACB_JFRAME_OFFSET 3
99#define MACB_JFRAME_SIZE 1
100#define MACB_CAF_OFFSET 4
101#define MACB_CAF_SIZE 1
102#define MACB_NBC_OFFSET 5
103#define MACB_NBC_SIZE 1
104#define MACB_NCFGR_MTI_OFFSET 6
105#define MACB_NCFGR_MTI_SIZE 1
106#define MACB_UNI_OFFSET 7
107#define MACB_UNI_SIZE 1
108#define MACB_BIG_OFFSET 8
109#define MACB_BIG_SIZE 1
110#define MACB_EAE_OFFSET 9
111#define MACB_EAE_SIZE 1
112#define MACB_CLK_OFFSET 10
113#define MACB_CLK_SIZE 2
114#define MACB_RTY_OFFSET 12
115#define MACB_RTY_SIZE 1
116#define MACB_PAE_OFFSET 13
117#define MACB_PAE_SIZE 1
118#define MACB_RBOF_OFFSET 14
119#define MACB_RBOF_SIZE 2
120#define MACB_RLCE_OFFSET 16
121#define MACB_RLCE_SIZE 1
122#define MACB_DRFCS_OFFSET 17
123#define MACB_DRFCS_SIZE 1
124#define MACB_EFRHD_OFFSET 18
125#define MACB_EFRHD_SIZE 1
126#define MACB_IRXFCS_OFFSET 19
127#define MACB_IRXFCS_SIZE 1
128
129/* Bitfields in NSR */
130#define MACB_NSR_LINK_OFFSET 0
131#define MACB_NSR_LINK_SIZE 1
132#define MACB_MDIO_OFFSET 1
133#define MACB_MDIO_SIZE 1
134#define MACB_IDLE_OFFSET 2
135#define MACB_IDLE_SIZE 1
136
137/* Bitfields in TSR */
138#define MACB_UBR_OFFSET 0
139#define MACB_UBR_SIZE 1
140#define MACB_COL_OFFSET 1
141#define MACB_COL_SIZE 1
142#define MACB_TSR_RLE_OFFSET 2
143#define MACB_TSR_RLE_SIZE 1
144#define MACB_TGO_OFFSET 3
145#define MACB_TGO_SIZE 1
146#define MACB_BEX_OFFSET 4
147#define MACB_BEX_SIZE 1
148#define MACB_COMP_OFFSET 5
149#define MACB_COMP_SIZE 1
150#define MACB_UND_OFFSET 6
151#define MACB_UND_SIZE 1
152
153/* Bitfields in RSR */
154#define MACB_BNA_OFFSET 0
155#define MACB_BNA_SIZE 1
156#define MACB_REC_OFFSET 1
157#define MACB_REC_SIZE 1
158#define MACB_OVR_OFFSET 2
159#define MACB_OVR_SIZE 1
160
161/* Bitfields in ISR/IER/IDR/IMR */
162#define MACB_MFD_OFFSET 0
163#define MACB_MFD_SIZE 1
164#define MACB_RCOMP_OFFSET 1
165#define MACB_RCOMP_SIZE 1
166#define MACB_RXUBR_OFFSET 2
167#define MACB_RXUBR_SIZE 1
168#define MACB_TXUBR_OFFSET 3
169#define MACB_TXUBR_SIZE 1
170#define MACB_ISR_TUND_OFFSET 4
171#define MACB_ISR_TUND_SIZE 1
172#define MACB_ISR_RLE_OFFSET 5
173#define MACB_ISR_RLE_SIZE 1
174#define MACB_TXERR_OFFSET 6
175#define MACB_TXERR_SIZE 1
176#define MACB_TCOMP_OFFSET 7
177#define MACB_TCOMP_SIZE 1
178#define MACB_ISR_LINK_OFFSET 9
179#define MACB_ISR_LINK_SIZE 1
180#define MACB_ISR_ROVR_OFFSET 10
181#define MACB_ISR_ROVR_SIZE 1
182#define MACB_HRESP_OFFSET 11
183#define MACB_HRESP_SIZE 1
184#define MACB_PFR_OFFSET 12
185#define MACB_PFR_SIZE 1
186#define MACB_PTZ_OFFSET 13
187#define MACB_PTZ_SIZE 1
188
189/* Bitfields in MAN */
190#define MACB_DATA_OFFSET 0
191#define MACB_DATA_SIZE 16
192#define MACB_CODE_OFFSET 16
193#define MACB_CODE_SIZE 2
194#define MACB_REGA_OFFSET 18
195#define MACB_REGA_SIZE 5
196#define MACB_PHYA_OFFSET 23
197#define MACB_PHYA_SIZE 5
198#define MACB_RW_OFFSET 28
199#define MACB_RW_SIZE 2
200#define MACB_SOF_OFFSET 30
201#define MACB_SOF_SIZE 2
202
203/* Bitfields in USRIO (AVR32) */
204#define MACB_MII_OFFSET 0
205#define MACB_MII_SIZE 1
206#define MACB_EAM_OFFSET 1
207#define MACB_EAM_SIZE 1
208#define MACB_TX_PAUSE_OFFSET 2
209#define MACB_TX_PAUSE_SIZE 1
210#define MACB_TX_PAUSE_ZERO_OFFSET 3
211#define MACB_TX_PAUSE_ZERO_SIZE 1
212
213/* Bitfields in USRIO (AT91) */
214#define MACB_RMII_OFFSET 0
215#define MACB_RMII_SIZE 1
216#define MACB_CLKEN_OFFSET 1
217#define MACB_CLKEN_SIZE 1
218
219/* Bitfields in WOL */
220#define MACB_IP_OFFSET 0
221#define MACB_IP_SIZE 16
222#define MACB_MAG_OFFSET 16
223#define MACB_MAG_SIZE 1
224#define MACB_ARP_OFFSET 17
225#define MACB_ARP_SIZE 1
226#define MACB_SA1_OFFSET 18
227#define MACB_SA1_SIZE 1
228#define MACB_WOL_MTI_OFFSET 19
229#define MACB_WOL_MTI_SIZE 1
230
231/* Constants for CLK */
232#define MACB_CLK_DIV8 0
233#define MACB_CLK_DIV16 1
234#define MACB_CLK_DIV32 2
235#define MACB_CLK_DIV64 3
236
237/* Constants for MAN register */
238#define MACB_MAN_SOF 1
239#define MACB_MAN_WRITE 1
240#define MACB_MAN_READ 2
241#define MACB_MAN_CODE 2
242
243/* Bit manipulation macros */
244#define MACB_BIT(name) \
245 (1 << MACB_##name##_OFFSET)
246#define MACB_BF(name,value) \
247 (((value) & ((1 << MACB_##name##_SIZE) - 1)) \
248 << MACB_##name##_OFFSET)
249#define MACB_BFEXT(name,value)\
250 (((value) >> MACB_##name##_OFFSET) \
251 & ((1 << MACB_##name##_SIZE) - 1))
252#define MACB_BFINS(name,value,old) \
253 (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \
254 << MACB_##name##_OFFSET)) \
255 | MACB_BF(name,value))
256
257/* Register access macros */
258#define macb_readl(port,reg) \
259 __raw_readl((port)->regs + MACB_##reg)
260#define macb_writel(port,reg,value) \
261 __raw_writel((value), (port)->regs + MACB_##reg)
262
263struct dma_desc {
264 u32 addr;
265 u32 ctrl;
266};
267
268/* DMA descriptor bitfields */
269#define MACB_RX_USED_OFFSET 0
270#define MACB_RX_USED_SIZE 1
271#define MACB_RX_WRAP_OFFSET 1
272#define MACB_RX_WRAP_SIZE 1
273#define MACB_RX_WADDR_OFFSET 2
274#define MACB_RX_WADDR_SIZE 30
275
276#define MACB_RX_FRMLEN_OFFSET 0
277#define MACB_RX_FRMLEN_SIZE 12
278#define MACB_RX_OFFSET_OFFSET 12
279#define MACB_RX_OFFSET_SIZE 2
280#define MACB_RX_SOF_OFFSET 14
281#define MACB_RX_SOF_SIZE 1
282#define MACB_RX_EOF_OFFSET 15
283#define MACB_RX_EOF_SIZE 1
284#define MACB_RX_CFI_OFFSET 16
285#define MACB_RX_CFI_SIZE 1
286#define MACB_RX_VLAN_PRI_OFFSET 17
287#define MACB_RX_VLAN_PRI_SIZE 3
288#define MACB_RX_PRI_TAG_OFFSET 20
289#define MACB_RX_PRI_TAG_SIZE 1
290#define MACB_RX_VLAN_TAG_OFFSET 21
291#define MACB_RX_VLAN_TAG_SIZE 1
292#define MACB_RX_TYPEID_MATCH_OFFSET 22
293#define MACB_RX_TYPEID_MATCH_SIZE 1
294#define MACB_RX_SA4_MATCH_OFFSET 23
295#define MACB_RX_SA4_MATCH_SIZE 1
296#define MACB_RX_SA3_MATCH_OFFSET 24
297#define MACB_RX_SA3_MATCH_SIZE 1
298#define MACB_RX_SA2_MATCH_OFFSET 25
299#define MACB_RX_SA2_MATCH_SIZE 1
300#define MACB_RX_SA1_MATCH_OFFSET 26
301#define MACB_RX_SA1_MATCH_SIZE 1
302#define MACB_RX_EXT_MATCH_OFFSET 28
303#define MACB_RX_EXT_MATCH_SIZE 1
304#define MACB_RX_UHASH_MATCH_OFFSET 29
305#define MACB_RX_UHASH_MATCH_SIZE 1
306#define MACB_RX_MHASH_MATCH_OFFSET 30
307#define MACB_RX_MHASH_MATCH_SIZE 1
308#define MACB_RX_BROADCAST_OFFSET 31
309#define MACB_RX_BROADCAST_SIZE 1
310
311#define MACB_TX_FRMLEN_OFFSET 0
312#define MACB_TX_FRMLEN_SIZE 11
313#define MACB_TX_LAST_OFFSET 15
314#define MACB_TX_LAST_SIZE 1
315#define MACB_TX_NOCRC_OFFSET 16
316#define MACB_TX_NOCRC_SIZE 1
317#define MACB_TX_BUF_EXHAUSTED_OFFSET 27
318#define MACB_TX_BUF_EXHAUSTED_SIZE 1
319#define MACB_TX_UNDERRUN_OFFSET 28
320#define MACB_TX_UNDERRUN_SIZE 1
321#define MACB_TX_ERROR_OFFSET 29
322#define MACB_TX_ERROR_SIZE 1
323#define MACB_TX_WRAP_OFFSET 30
324#define MACB_TX_WRAP_SIZE 1
325#define MACB_TX_USED_OFFSET 31
326#define MACB_TX_USED_SIZE 1
327
328struct ring_info {
329 struct sk_buff *skb;
330 dma_addr_t mapping;
331};
332
333/*
334 * Hardware-collected statistics. Used when updating the network
335 * device stats by a periodic timer.
336 */
337struct macb_stats {
338 u32 rx_pause_frames;
339 u32 tx_ok;
340 u32 tx_single_cols;
341 u32 tx_multiple_cols;
342 u32 rx_ok;
343 u32 rx_fcs_errors;
344 u32 rx_align_errors;
345 u32 tx_deferred;
346 u32 tx_late_cols;
347 u32 tx_excessive_cols;
348 u32 tx_underruns;
349 u32 tx_carrier_errors;
350 u32 rx_resource_errors;
351 u32 rx_overruns;
352 u32 rx_symbol_errors;
353 u32 rx_oversize_pkts;
354 u32 rx_jabbers;
355 u32 rx_undersize_pkts;
356 u32 sqe_test_errors;
357 u32 rx_length_mismatch;
358 u32 tx_pause_frames;
359};
360
361struct macb {
362 void __iomem *regs;
363
364 unsigned int rx_tail;
365 struct dma_desc *rx_ring;
366 void *rx_buffers;
367
368 unsigned int tx_head, tx_tail;
369 struct dma_desc *tx_ring;
370 struct ring_info *tx_skb;
371
372 spinlock_t lock;
373 struct platform_device *pdev;
374 struct clk *pclk;
375 struct clk *hclk;
376 struct net_device *dev;
377 struct napi_struct napi;
378 struct net_device_stats stats;
379 struct macb_stats hw_stats;
380
381 dma_addr_t rx_ring_dma;
382 dma_addr_t tx_ring_dma;
383 dma_addr_t rx_buffers_dma;
384
385 unsigned int rx_pending, tx_pending;
386
387 struct mii_bus *mii_bus;
388 struct phy_device *phy_dev;
389 unsigned int link;
390 unsigned int speed;
391 unsigned int duplex;
392};
393
394#endif /* _MACB_H */