aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/arm
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/arm')
-rw-r--r--drivers/net/arm/Kconfig74
-rw-r--r--drivers/net/arm/Makefile14
-rw-r--r--drivers/net/arm/am79c961a.c770
-rw-r--r--drivers/net/arm/am79c961a.h145
-rw-r--r--drivers/net/arm/at91_ether.c1254
-rw-r--r--drivers/net/arm/at91_ether.h109
-rw-r--r--drivers/net/arm/ep93xx_eth.c904
-rw-r--r--drivers/net/arm/ether1.c1094
-rw-r--r--drivers/net/arm/ether1.h280
-rw-r--r--drivers/net/arm/ether3.c918
-rw-r--r--drivers/net/arm/ether3.h176
-rw-r--r--drivers/net/arm/etherh.c866
-rw-r--r--drivers/net/arm/ixp4xx_eth.c1489
-rw-r--r--drivers/net/arm/ks8695net.c1656
-rw-r--r--drivers/net/arm/ks8695net.h107
-rw-r--r--drivers/net/arm/w90p910_ether.c1123
16 files changed, 10979 insertions, 0 deletions
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
new file mode 100644
index 00000000000..39e1c0d3947
--- /dev/null
+++ b/drivers/net/arm/Kconfig
@@ -0,0 +1,74 @@
1#
2# Acorn Network device configuration
3# These are for Acorn's Expansion card network interfaces
4#
5config ARM_AM79C961A
6 bool "ARM EBSA110 AM79C961A support"
7 depends on ARM && ARCH_EBSA110
8 select CRC32
9 help
10 If you wish to compile a kernel for the EBSA-110, then you should
11 always answer Y to this.
12
13config ARM_ETHER1
14 tristate "Acorn Ether1 support"
15 depends on ARM && ARCH_ACORN
16 help
17 If you have an Acorn system with one of these (AKA25) network cards,
18 you should say Y to this option if you wish to use it with Linux.
19
20config ARM_ETHER3
21 tristate "Acorn/ANT Ether3 support"
22 depends on ARM && ARCH_ACORN
23 help
24 If you have an Acorn system with one of these network cards, you
25 should say Y to this option if you wish to use it with Linux.
26
27config ARM_ETHERH
28 tristate "I-cubed EtherH/ANT EtherM support"
29 depends on ARM && ARCH_ACORN
30 select CRC32
31 help
32 If you have an Acorn system with one of these network cards, you
33 should say Y to this option if you wish to use it with Linux.
34
35config ARM_AT91_ETHER
36 tristate "AT91RM9200 Ethernet support"
37 depends on ARM && ARCH_AT91RM9200
38 select MII
39 help
40 If you wish to compile a kernel for the AT91RM9200 and enable
41 ethernet support, then you should always answer Y to this.
42
43config ARM_KS8695_ETHER
44 tristate "KS8695 Ethernet support"
45 depends on ARM && ARCH_KS8695
46 select MII
47 help
48 If you wish to compile a kernel for the KS8695 and want to
49 use the internal ethernet then you should answer Y to this.
50
51config EP93XX_ETH
52 tristate "EP93xx Ethernet support"
53 depends on ARM && ARCH_EP93XX
54 select MII
55 help
56 This is a driver for the ethernet hardware included in EP93xx CPUs.
57 Say Y if you are building a kernel for EP93xx based devices.
58
59config IXP4XX_ETH
60 tristate "Intel IXP4xx Ethernet support"
61 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
62 select PHYLIB
63 help
64 Say Y here if you want to use built-in Ethernet ports
65 on IXP4xx processor.
66
67config W90P910_ETH
68 tristate "Nuvoton w90p910 Ethernet support"
69 depends on ARM && ARCH_W90X900
70 select PHYLIB
71 select MII
72 help
73 Say Y here if you want to use built-in Ethernet ports
74 on w90p910 processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
new file mode 100644
index 00000000000..303171f589e
--- /dev/null
+++ b/drivers/net/arm/Makefile
@@ -0,0 +1,14 @@
1# File: drivers/net/arm/Makefile
2#
3# Makefile for the ARM network device drivers
4#
5
6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
7obj-$(CONFIG_ARM_ETHERH) += etherh.o
8obj-$(CONFIG_ARM_ETHER3) += ether3.o
9obj-$(CONFIG_ARM_ETHER1) += ether1.o
10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
11obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
12obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
13obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
14obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
new file mode 100644
index 00000000000..3b1416e3d21
--- /dev/null
+++ b/drivers/net/arm/am79c961a.c
@@ -0,0 +1,770 @@
1/*
2 * linux/drivers/net/am79c961.c
3 *
4 * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Derived from various things including skeleton.c
11 *
12 * This is a special driver for the am79c961A Lance chip used in the
13 * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
14 * note that this can not be built as a module (it doesn't make sense).
15 */
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/crc32.h>
28#include <linux/bitops.h>
29#include <linux/platform_device.h>
30#include <linux/io.h>
31
32#include <mach/hardware.h>
33#include <asm/system.h>
34
35#define TX_BUFFERS 15
36#define RX_BUFFERS 25
37
38#include "am79c961a.h"
39
40static irqreturn_t
41am79c961_interrupt (int irq, void *dev_id);
42
43static unsigned int net_debug = NET_DEBUG;
44
45static const char version[] =
46 "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
47
48/* --------------------------------------------------------------------------- */
49
50#ifdef __arm__
51static void write_rreg(u_long base, u_int reg, u_int val)
52{
53 asm volatile(
54 "str%?h %1, [%2] @ NET_RAP\n\t"
55 "str%?h %0, [%2, #-4] @ NET_RDP"
56 :
57 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
58}
59
60static inline unsigned short read_rreg(u_long base_addr, u_int reg)
61{
62 unsigned short v;
63 asm volatile(
64 "str%?h %1, [%2] @ NET_RAP\n\t"
65 "ldr%?h %0, [%2, #-4] @ NET_RDP"
66 : "=r" (v)
67 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
68 return v;
69}
70
71static inline void write_ireg(u_long base, u_int reg, u_int val)
72{
73 asm volatile(
74 "str%?h %1, [%2] @ NET_RAP\n\t"
75 "str%?h %0, [%2, #8] @ NET_IDP"
76 :
77 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
78}
79
80static inline unsigned short read_ireg(u_long base_addr, u_int reg)
81{
82 u_short v;
83 asm volatile(
84 "str%?h %1, [%2] @ NAT_RAP\n\t"
85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
86 : "=r" (v)
87 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
88 return v;
89}
90
91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
93
94static void
95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
96{
97 offset = ISAMEM_BASE + (offset << 1);
98 length = (length + 1) & ~1;
99 if ((int)buf & 2) {
100 asm volatile("str%?h %2, [%0], #4"
101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
102 buf += 2;
103 length -= 2;
104 }
105 while (length > 8) {
106 register unsigned int tmp asm("r2"), tmp2 asm("r3");
107 asm volatile(
108 "ldm%?ia %0!, {%1, %2}"
109 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
110 length -= 8;
111 asm volatile(
112 "str%?h %1, [%0], #4\n\t"
113 "mov%? %1, %1, lsr #16\n\t"
114 "str%?h %1, [%0], #4\n\t"
115 "str%?h %2, [%0], #4\n\t"
116 "mov%? %2, %2, lsr #16\n\t"
117 "str%?h %2, [%0], #4"
118 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
119 }
120 while (length > 0) {
121 asm volatile("str%?h %2, [%0], #4"
122 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
123 buf += 2;
124 length -= 2;
125 }
126}
127
128static void
129am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
130{
131 offset = ISAMEM_BASE + (offset << 1);
132 length = (length + 1) & ~1;
133 if ((int)buf & 2) {
134 unsigned int tmp;
135 asm volatile(
136 "ldr%?h %2, [%0], #4\n\t"
137 "str%?b %2, [%1], #1\n\t"
138 "mov%? %2, %2, lsr #8\n\t"
139 "str%?b %2, [%1], #1"
140 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
141 length -= 2;
142 }
143 while (length > 8) {
144 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
145 asm volatile(
146 "ldr%?h %2, [%0], #4\n\t"
147 "ldr%?h %4, [%0], #4\n\t"
148 "ldr%?h %3, [%0], #4\n\t"
149 "orr%? %2, %2, %4, lsl #16\n\t"
150 "ldr%?h %4, [%0], #4\n\t"
151 "orr%? %3, %3, %4, lsl #16\n\t"
152 "stm%?ia %1!, {%2, %3}"
153 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
154 : "0" (offset), "1" (buf));
155 length -= 8;
156 }
157 while (length > 0) {
158 unsigned int tmp;
159 asm volatile(
160 "ldr%?h %2, [%0], #4\n\t"
161 "str%?b %2, [%1], #1\n\t"
162 "mov%? %2, %2, lsr #8\n\t"
163 "str%?b %2, [%1], #1"
164 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
165 length -= 2;
166 }
167}
168#else
169#error Not compatible
170#endif
171
172static int
173am79c961_ramtest(struct net_device *dev, unsigned int val)
174{
175 unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
176 int i, error = 0, errorcount = 0;
177
178 if (!buffer)
179 return 0;
180 memset (buffer, val, 65536);
181 am_writebuffer(dev, 0, buffer, 65536);
182 memset (buffer, val ^ 255, 65536);
183 am_readbuffer(dev, 0, buffer, 65536);
184 for (i = 0; i < 65536; i++) {
185 if (buffer[i] != val && !error) {
186 printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
187 error = 1;
188 errorcount ++;
189 } else if (error && buffer[i] == val) {
190 printk ("%05X\n", i);
191 error = 0;
192 }
193 }
194 if (error)
195 printk ("10000\n");
196 kfree (buffer);
197 return errorcount;
198}
199
200static void am79c961_mc_hash(char *addr, u16 *hash)
201{
202 int idx, bit;
203 u32 crc;
204
205 crc = ether_crc_le(ETH_ALEN, addr);
206
207 idx = crc >> 30;
208 bit = (crc >> 26) & 15;
209
210 hash[idx] |= 1 << bit;
211}
212
213static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
214{
215 unsigned int mode = MODE_PORT_10BT;
216
217 if (dev->flags & IFF_PROMISC) {
218 mode |= MODE_PROMISC;
219 memset(hash, 0xff, 4 * sizeof(*hash));
220 } else if (dev->flags & IFF_ALLMULTI) {
221 memset(hash, 0xff, 4 * sizeof(*hash));
222 } else {
223 struct netdev_hw_addr *ha;
224
225 memset(hash, 0, 4 * sizeof(*hash));
226
227 netdev_for_each_mc_addr(ha, dev)
228 am79c961_mc_hash(ha->addr, hash);
229 }
230
231 return mode;
232}
233
234static void
235am79c961_init_for_open(struct net_device *dev)
236{
237 struct dev_priv *priv = netdev_priv(dev);
238 unsigned long flags;
239 unsigned char *p;
240 u_int hdr_addr, first_free_addr;
241 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
242 int i;
243
244 /*
245 * Stop the chip.
246 */
247 spin_lock_irqsave(&priv->chip_lock, flags);
248 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
249 spin_unlock_irqrestore(&priv->chip_lock, flags);
250
251 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
252 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
253 write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
254 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
255
256 for (i = LADRL; i <= LADRH; i++)
257 write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
258
259 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
260 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
261
262 write_rreg (dev->base_addr, MODE, mode);
263 write_rreg (dev->base_addr, POLLINT, 0);
264 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
265 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
266
267 first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
268 hdr_addr = 0;
269
270 priv->rxhead = 0;
271 priv->rxtail = 0;
272 priv->rxhdr = hdr_addr;
273
274 for (i = 0; i < RX_BUFFERS; i++) {
275 priv->rxbuffer[i] = first_free_addr;
276 am_writeword (dev, hdr_addr, first_free_addr);
277 am_writeword (dev, hdr_addr + 2, RMD_OWN);
278 am_writeword (dev, hdr_addr + 4, (-1600));
279 am_writeword (dev, hdr_addr + 6, 0);
280 first_free_addr += 1600;
281 hdr_addr += 8;
282 }
283 priv->txhead = 0;
284 priv->txtail = 0;
285 priv->txhdr = hdr_addr;
286 for (i = 0; i < TX_BUFFERS; i++) {
287 priv->txbuffer[i] = first_free_addr;
288 am_writeword (dev, hdr_addr, first_free_addr);
289 am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
290 am_writeword (dev, hdr_addr + 4, 0xf000);
291 am_writeword (dev, hdr_addr + 6, 0);
292 first_free_addr += 1600;
293 hdr_addr += 8;
294 }
295
296 write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
297 write_rreg (dev->base_addr, BASERXH, 0);
298 write_rreg (dev->base_addr, BASETXL, priv->txhdr);
299 write_rreg (dev->base_addr, BASERXH, 0);
300 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
301 write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
302 write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
303 write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
304}
305
306static void am79c961_timer(unsigned long data)
307{
308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier;
311 unsigned long flags;
312
313 spin_lock_irqsave(&priv->chip_lock, flags);
314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
316 carrier = netif_carrier_ok(dev);
317
318 if (lnkstat && !carrier) {
319 netif_carrier_on(dev);
320 printk("%s: link up\n", dev->name);
321 } else if (!lnkstat && carrier) {
322 netif_carrier_off(dev);
323 printk("%s: link down\n", dev->name);
324 }
325
326 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
327}
328
329/*
330 * Open/initialize the board.
331 */
332static int
333am79c961_open(struct net_device *dev)
334{
335 struct dev_priv *priv = netdev_priv(dev);
336 int ret;
337
338 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
339 if (ret)
340 return ret;
341
342 am79c961_init_for_open(dev);
343
344 netif_carrier_off(dev);
345
346 priv->timer.expires = jiffies;
347 add_timer(&priv->timer);
348
349 netif_start_queue(dev);
350
351 return 0;
352}
353
354/*
355 * The inverse routine to am79c961_open().
356 */
357static int
358am79c961_close(struct net_device *dev)
359{
360 struct dev_priv *priv = netdev_priv(dev);
361 unsigned long flags;
362
363 del_timer_sync(&priv->timer);
364
365 netif_stop_queue(dev);
366 netif_carrier_off(dev);
367
368 spin_lock_irqsave(&priv->chip_lock, flags);
369 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
370 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
371 spin_unlock_irqrestore(&priv->chip_lock, flags);
372
373 free_irq (dev->irq, dev);
374
375 return 0;
376}
377
378/*
379 * Set or clear promiscuous/multicast mode filter for this adapter.
380 */
381static void am79c961_setmulticastlist (struct net_device *dev)
382{
383 struct dev_priv *priv = netdev_priv(dev);
384 unsigned long flags;
385 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
386 int i, stopped;
387
388 spin_lock_irqsave(&priv->chip_lock, flags);
389
390 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
391
392 if (!stopped) {
393 /*
394 * Put the chip into suspend mode
395 */
396 write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
397
398 /*
399 * Spin waiting for chip to report suspend mode
400 */
401 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
402 spin_unlock_irqrestore(&priv->chip_lock, flags);
403 nop();
404 spin_lock_irqsave(&priv->chip_lock, flags);
405 }
406 }
407
408 /*
409 * Update the multicast hash table
410 */
411 for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
412 write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
413
414 /*
415 * Write the mode register
416 */
417 write_rreg(dev->base_addr, MODE, mode);
418
419 if (!stopped) {
420 /*
421 * Put the chip back into running mode
422 */
423 write_rreg(dev->base_addr, CTRL1, 0);
424 }
425
426 spin_unlock_irqrestore(&priv->chip_lock, flags);
427}
428
429static void am79c961_timeout(struct net_device *dev)
430{
431 printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
432 dev->name);
433
434 /*
435 * ought to do some setup of the tx side here
436 */
437
438 netif_wake_queue(dev);
439}
440
441/*
442 * Transmit a packet
443 */
444static int
445am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
446{
447 struct dev_priv *priv = netdev_priv(dev);
448 unsigned int hdraddr, bufaddr;
449 unsigned int head;
450 unsigned long flags;
451
452 head = priv->txhead;
453 hdraddr = priv->txhdr + (head << 3);
454 bufaddr = priv->txbuffer[head];
455 head += 1;
456 if (head >= TX_BUFFERS)
457 head = 0;
458
459 am_writebuffer (dev, bufaddr, skb->data, skb->len);
460 am_writeword (dev, hdraddr + 4, -skb->len);
461 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
462 priv->txhead = head;
463
464 spin_lock_irqsave(&priv->chip_lock, flags);
465 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
466 spin_unlock_irqrestore(&priv->chip_lock, flags);
467
468 /*
469 * If the next packet is owned by the ethernet device,
470 * then the tx ring is full and we can't add another
471 * packet.
472 */
473 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
474 netif_stop_queue(dev);
475
476 dev_kfree_skb(skb);
477
478 return NETDEV_TX_OK;
479}
480
481/*
482 * If we have a good packet(s), get it/them out of the buffers.
483 */
484static void
485am79c961_rx(struct net_device *dev, struct dev_priv *priv)
486{
487 do {
488 struct sk_buff *skb;
489 u_int hdraddr;
490 u_int pktaddr;
491 u_int status;
492 int len;
493
494 hdraddr = priv->rxhdr + (priv->rxtail << 3);
495 pktaddr = priv->rxbuffer[priv->rxtail];
496
497 status = am_readword (dev, hdraddr + 2);
498 if (status & RMD_OWN) /* do we own it? */
499 break;
500
501 priv->rxtail ++;
502 if (priv->rxtail >= RX_BUFFERS)
503 priv->rxtail = 0;
504
505 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
506 am_writeword (dev, hdraddr + 2, RMD_OWN);
507 dev->stats.rx_errors++;
508 if (status & RMD_ERR) {
509 if (status & RMD_FRAM)
510 dev->stats.rx_frame_errors++;
511 if (status & RMD_CRC)
512 dev->stats.rx_crc_errors++;
513 } else if (status & RMD_STP)
514 dev->stats.rx_length_errors++;
515 continue;
516 }
517
518 len = am_readword(dev, hdraddr + 6);
519 skb = dev_alloc_skb(len + 2);
520
521 if (skb) {
522 skb_reserve(skb, 2);
523
524 am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
525 am_writeword(dev, hdraddr + 2, RMD_OWN);
526 skb->protocol = eth_type_trans(skb, dev);
527 netif_rx(skb);
528 dev->stats.rx_bytes += len;
529 dev->stats.rx_packets++;
530 } else {
531 am_writeword (dev, hdraddr + 2, RMD_OWN);
532 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
533 dev->stats.rx_dropped++;
534 break;
535 }
536 } while (1);
537}
538
539/*
540 * Update stats for the transmitted packet
541 */
542static void
543am79c961_tx(struct net_device *dev, struct dev_priv *priv)
544{
545 do {
546 short len;
547 u_int hdraddr;
548 u_int status;
549
550 hdraddr = priv->txhdr + (priv->txtail << 3);
551 status = am_readword (dev, hdraddr + 2);
552 if (status & TMD_OWN)
553 break;
554
555 priv->txtail ++;
556 if (priv->txtail >= TX_BUFFERS)
557 priv->txtail = 0;
558
559 if (status & TMD_ERR) {
560 u_int status2;
561
562 dev->stats.tx_errors++;
563
564 status2 = am_readword (dev, hdraddr + 6);
565
566 /*
567 * Clear the error byte
568 */
569 am_writeword (dev, hdraddr + 6, 0);
570
571 if (status2 & TST_RTRY)
572 dev->stats.collisions += 16;
573 if (status2 & TST_LCOL)
574 dev->stats.tx_window_errors++;
575 if (status2 & TST_LCAR)
576 dev->stats.tx_carrier_errors++;
577 if (status2 & TST_UFLO)
578 dev->stats.tx_fifo_errors++;
579 continue;
580 }
581 dev->stats.tx_packets++;
582 len = am_readword (dev, hdraddr + 4);
583 dev->stats.tx_bytes += -len;
584 } while (priv->txtail != priv->txhead);
585
586 netif_wake_queue(dev);
587}
588
589static irqreturn_t
590am79c961_interrupt(int irq, void *dev_id)
591{
592 struct net_device *dev = (struct net_device *)dev_id;
593 struct dev_priv *priv = netdev_priv(dev);
594 u_int status, n = 100;
595 int handled = 0;
596
597 do {
598 status = read_rreg(dev->base_addr, CSR0);
599 write_rreg(dev->base_addr, CSR0, status &
600 (CSR0_IENA|CSR0_TINT|CSR0_RINT|
601 CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
602
603 if (status & CSR0_RINT) {
604 handled = 1;
605 am79c961_rx(dev, priv);
606 }
607 if (status & CSR0_TINT) {
608 handled = 1;
609 am79c961_tx(dev, priv);
610 }
611 if (status & CSR0_MISS) {
612 handled = 1;
613 dev->stats.rx_dropped++;
614 }
615 if (status & CSR0_CERR) {
616 handled = 1;
617 mod_timer(&priv->timer, jiffies);
618 }
619 } while (--n && status & (CSR0_RINT | CSR0_TINT));
620
621 return IRQ_RETVAL(handled);
622}
623
624#ifdef CONFIG_NET_POLL_CONTROLLER
625static void am79c961_poll_controller(struct net_device *dev)
626{
627 unsigned long flags;
628 local_irq_save(flags);
629 am79c961_interrupt(dev->irq, dev);
630 local_irq_restore(flags);
631}
632#endif
633
634/*
635 * Initialise the chip. Note that we always expect
636 * to be entered with interrupts enabled.
637 */
638static int
639am79c961_hw_init(struct net_device *dev)
640{
641 struct dev_priv *priv = netdev_priv(dev);
642
643 spin_lock_irq(&priv->chip_lock);
644 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
645 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
646 spin_unlock_irq(&priv->chip_lock);
647
648 am79c961_ramtest(dev, 0x66);
649 am79c961_ramtest(dev, 0x99);
650
651 return 0;
652}
653
654static void __init am79c961_banner(void)
655{
656 static unsigned version_printed;
657
658 if (net_debug && version_printed++ == 0)
659 printk(KERN_INFO "%s", version);
660}
661static const struct net_device_ops am79c961_netdev_ops = {
662 .ndo_open = am79c961_open,
663 .ndo_stop = am79c961_close,
664 .ndo_start_xmit = am79c961_sendpacket,
665 .ndo_set_multicast_list = am79c961_setmulticastlist,
666 .ndo_tx_timeout = am79c961_timeout,
667 .ndo_validate_addr = eth_validate_addr,
668 .ndo_change_mtu = eth_change_mtu,
669 .ndo_set_mac_address = eth_mac_addr,
670#ifdef CONFIG_NET_POLL_CONTROLLER
671 .ndo_poll_controller = am79c961_poll_controller,
672#endif
673};
674
675static int __devinit am79c961_probe(struct platform_device *pdev)
676{
677 struct resource *res;
678 struct net_device *dev;
679 struct dev_priv *priv;
680 int i, ret;
681
682 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
683 if (!res)
684 return -ENODEV;
685
686 dev = alloc_etherdev(sizeof(struct dev_priv));
687 ret = -ENOMEM;
688 if (!dev)
689 goto out;
690
691 SET_NETDEV_DEV(dev, &pdev->dev);
692
693 priv = netdev_priv(dev);
694
695 /*
696 * Fixed address and IRQ lines here.
697 * The PNP initialisation should have been
698 * done by the ether bootp loader.
699 */
700 dev->base_addr = res->start;
701 ret = platform_get_irq(pdev, 0);
702
703 if (ret < 0) {
704 ret = -ENODEV;
705 goto nodev;
706 }
707 dev->irq = ret;
708
709 ret = -ENODEV;
710 if (!request_region(dev->base_addr, 0x18, dev->name))
711 goto nodev;
712
713 /*
714 * Reset the device.
715 */
716 inb(dev->base_addr + NET_RESET);
717 udelay(5);
718
719 /*
720 * Check the manufacturer part of the
721 * ether address.
722 */
723 if (inb(dev->base_addr) != 0x08 ||
724 inb(dev->base_addr + 2) != 0x00 ||
725 inb(dev->base_addr + 4) != 0x2b)
726 goto release;
727
728 for (i = 0; i < 6; i++)
729 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
730
731 am79c961_banner();
732
733 spin_lock_init(&priv->chip_lock);
734 init_timer(&priv->timer);
735 priv->timer.data = (unsigned long)dev;
736 priv->timer.function = am79c961_timer;
737
738 if (am79c961_hw_init(dev))
739 goto release;
740
741 dev->netdev_ops = &am79c961_netdev_ops;
742
743 ret = register_netdev(dev);
744 if (ret == 0) {
745 printk(KERN_INFO "%s: ether address %pM\n",
746 dev->name, dev->dev_addr);
747 return 0;
748 }
749
750release:
751 release_region(dev->base_addr, 0x18);
752nodev:
753 free_netdev(dev);
754out:
755 return ret;
756}
757
758static struct platform_driver am79c961_driver = {
759 .probe = am79c961_probe,
760 .driver = {
761 .name = "am79c961",
762 },
763};
764
765static int __init am79c961_init(void)
766{
767 return platform_driver_register(&am79c961_driver);
768}
769
770__initcall(am79c961_init);
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
new file mode 100644
index 00000000000..fd634d32756
--- /dev/null
+++ b/drivers/net/arm/am79c961a.h
@@ -0,0 +1,145 @@
1/*
2 * linux/drivers/net/arm/am79c961a.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _LINUX_am79c961a_H
10#define _LINUX_am79c961a_H
11
12/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
13#define DEBUG_TX 2
14#define DEBUG_RX 4
15#define DEBUG_INT 8
16#define DEBUG_IC 16
17#ifndef NET_DEBUG
18#define NET_DEBUG 0
19#endif
20
21#define NET_UID 0
22#define NET_RDP 0x10
23#define NET_RAP 0x12
24#define NET_RESET 0x14
25#define NET_IDP 0x16
26
27/*
28 * RAP registers
29 */
30#define CSR0 0
31#define CSR0_INIT 0x0001
32#define CSR0_STRT 0x0002
33#define CSR0_STOP 0x0004
34#define CSR0_TDMD 0x0008
35#define CSR0_TXON 0x0010
36#define CSR0_RXON 0x0020
37#define CSR0_IENA 0x0040
38#define CSR0_INTR 0x0080
39#define CSR0_IDON 0x0100
40#define CSR0_TINT 0x0200
41#define CSR0_RINT 0x0400
42#define CSR0_MERR 0x0800
43#define CSR0_MISS 0x1000
44#define CSR0_CERR 0x2000
45#define CSR0_BABL 0x4000
46#define CSR0_ERR 0x8000
47
48#define CSR3 3
49#define CSR3_EMBA 0x0008
50#define CSR3_DXMT2PD 0x0010
51#define CSR3_LAPPEN 0x0020
52#define CSR3_DXSUFLO 0x0040
53#define CSR3_IDONM 0x0100
54#define CSR3_TINTM 0x0200
55#define CSR3_RINTM 0x0400
56#define CSR3_MERRM 0x0800
57#define CSR3_MISSM 0x1000
58#define CSR3_BABLM 0x4000
59#define CSR3_MASKALL 0x5F00
60
61#define CSR4 4
62#define CSR4_JABM 0x0001
63#define CSR4_JAB 0x0002
64#define CSR4_TXSTRTM 0x0004
65#define CSR4_TXSTRT 0x0008
66#define CSR4_RCVCCOM 0x0010
67#define CSR4_RCVCCO 0x0020
68#define CSR4_MFCOM 0x0100
69#define CSR4_MFCO 0x0200
70#define CSR4_ASTRP_RCV 0x0400
71#define CSR4_APAD_XMIT 0x0800
72
73#define CTRL1 5
74#define CTRL1_SPND 0x0001
75
76#define LADRL 8
77#define LADRM1 9
78#define LADRM2 10
79#define LADRH 11
80#define PADRL 12
81#define PADRM 13
82#define PADRH 14
83
84#define MODE 15
85#define MODE_DISRX 0x0001
86#define MODE_DISTX 0x0002
87#define MODE_LOOP 0x0004
88#define MODE_DTCRC 0x0008
89#define MODE_COLL 0x0010
90#define MODE_DRETRY 0x0020
91#define MODE_INTLOOP 0x0040
92#define MODE_PORT_AUI 0x0000
93#define MODE_PORT_10BT 0x0080
94#define MODE_DRXPA 0x2000
95#define MODE_DRXBA 0x4000
96#define MODE_PROMISC 0x8000
97
98#define BASERXL 24
99#define BASERXH 25
100#define BASETXL 30
101#define BASETXH 31
102
103#define POLLINT 47
104
105#define SIZERXR 76
106#define SIZETXR 78
107
108#define CSR_MFC 112
109
110#define RMD_ENP 0x0100
111#define RMD_STP 0x0200
112#define RMD_CRC 0x0800
113#define RMD_FRAM 0x2000
114#define RMD_ERR 0x4000
115#define RMD_OWN 0x8000
116
117#define TMD_ENP 0x0100
118#define TMD_STP 0x0200
119#define TMD_MORE 0x1000
120#define TMD_ERR 0x4000
121#define TMD_OWN 0x8000
122
123#define TST_RTRY 0x0400
124#define TST_LCAR 0x0800
125#define TST_LCOL 0x1000
126#define TST_UFLO 0x4000
127#define TST_BUFF 0x8000
128
129#define ISALED0 0x0004
130#define ISALED0_LNKST 0x8000
131
132struct dev_priv {
133 unsigned long rxbuffer[RX_BUFFERS];
134 unsigned long txbuffer[TX_BUFFERS];
135 unsigned char txhead;
136 unsigned char txtail;
137 unsigned char rxhead;
138 unsigned char rxtail;
139 unsigned long rxhdr;
140 unsigned long txhdr;
141 spinlock_t chip_lock;
142 struct timer_list timer;
143};
144
145#endif
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
new file mode 100644
index 00000000000..29dc43523ce
--- /dev/null
+++ b/drivers/net/arm/at91_ether.c
@@ -0,0 +1,1254 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) 2003 SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson 01/11/2003
8 *
9 * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
10 * (Polaroid Corporation)
11 *
12 * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/mii.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/dma-mapping.h>
28#include <linux/ethtool.h>
29#include <linux/platform_device.h>
30#include <linux/clk.h>
31#include <linux/gfp.h>
32
33#include <asm/io.h>
34#include <asm/uaccess.h>
35#include <asm/mach-types.h>
36
37#include <mach/at91rm9200_emac.h>
38#include <mach/gpio.h>
39#include <mach/board.h>
40
41#include "at91_ether.h"
42
43#define DRV_NAME "at91_ether"
44#define DRV_VERSION "1.0"
45
46#define LINK_POLL_INTERVAL (HZ)
47
48/* ..................................................................... */
49
50/*
51 * Read from a EMAC register.
52 */
53static inline unsigned long at91_emac_read(unsigned int reg)
54{
55 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
56
57 return __raw_readl(emac_base + reg);
58}
59
60/*
61 * Write to a EMAC register.
62 */
63static inline void at91_emac_write(unsigned int reg, unsigned long value)
64{
65 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
66
67 __raw_writel(value, emac_base + reg);
68}
69
70/* ........................... PHY INTERFACE ........................... */
71
72/*
73 * Enable the MDIO bit in MAC control register
74 * When not called from an interrupt-handler, access to the PHY must be
75 * protected by a spinlock.
76 */
77static void enable_mdi(void)
78{
79 unsigned long ctl;
80
81 ctl = at91_emac_read(AT91_EMAC_CTL);
82 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
83}
84
85/*
86 * Disable the MDIO bit in the MAC control register
87 */
88static void disable_mdi(void)
89{
90 unsigned long ctl;
91
92 ctl = at91_emac_read(AT91_EMAC_CTL);
93 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
94}
95
96/*
97 * Wait until the PHY operation is complete.
98 */
99static inline void at91_phy_wait(void) {
100 unsigned long timeout = jiffies + 2;
101
102 while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
103 if (time_after(jiffies, timeout)) {
104 printk("at91_ether: MIO timeout\n");
105 break;
106 }
107 cpu_relax();
108 }
109}
110
111/*
112 * Write value to the a PHY register
113 * Note: MDI interface is assumed to already have been enabled.
114 */
115static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value)
116{
117 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
118 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
119
120 /* Wait until IDLE bit in Network Status register is cleared */
121 at91_phy_wait();
122}
123
124/*
125 * Read value stored in a PHY register.
126 * Note: MDI interface is assumed to already have been enabled.
127 */
128static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value)
129{
130 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
131 | ((phy_addr & 0x1f) << 23) | (address << 18));
132
133 /* Wait until IDLE bit in Network Status register is cleared */
134 at91_phy_wait();
135
136 *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA;
137}
138
139/* ........................... PHY MANAGEMENT .......................... */
140
141/*
142 * Access the PHY to determine the current link speed and mode, and update the
143 * MAC accordingly.
144 * If no link or auto-negotiation is busy, then no changes are made.
145 */
146static void update_linkspeed(struct net_device *dev, int silent)
147{
148 struct at91_private *lp = netdev_priv(dev);
149 unsigned int bmsr, bmcr, lpa, mac_cfg;
150 unsigned int speed, duplex;
151
152 if (!mii_link_ok(&lp->mii)) { /* no link */
153 netif_carrier_off(dev);
154 if (!silent)
155 printk(KERN_INFO "%s: Link down.\n", dev->name);
156 return;
157 }
158
159 /* Link up, or auto-negotiation still in progress */
160 read_phy(lp->phy_address, MII_BMSR, &bmsr);
161 read_phy(lp->phy_address, MII_BMCR, &bmcr);
162 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
163 if (!(bmsr & BMSR_ANEGCOMPLETE))
164 return; /* Do nothing - another interrupt generated when negotiation complete */
165
166 read_phy(lp->phy_address, MII_LPA, &lpa);
167 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
168 else speed = SPEED_10;
169 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
170 else duplex = DUPLEX_HALF;
171 } else {
172 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
173 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
174 }
175
176 /* Update the MAC */
177 mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
178 if (speed == SPEED_100) {
179 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
180 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
181 else /* 100 Half Duplex */
182 mac_cfg |= AT91_EMAC_SPD;
183 } else {
184 if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
185 mac_cfg |= AT91_EMAC_FD;
186 else {} /* 10 Half Duplex */
187 }
188 at91_emac_write(AT91_EMAC_CFG, mac_cfg);
189
190 if (!silent)
191 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
192 netif_carrier_on(dev);
193}
194
195/*
196 * Handle interrupts from the PHY
197 */
198static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
199{
200 struct net_device *dev = (struct net_device *) dev_id;
201 struct at91_private *lp = netdev_priv(dev);
202 unsigned int phy;
203
204 /*
205 * This hander is triggered on both edges, but the PHY chips expect
206 * level-triggering. We therefore have to check if the PHY actually has
207 * an IRQ pending.
208 */
209 enable_mdi();
210 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
211 read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
212 if (!(phy & (1 << 0)))
213 goto done;
214 }
215 else if (lp->phy_type == MII_LXT971A_ID) {
216 read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
217 if (!(phy & (1 << 2)))
218 goto done;
219 }
220 else if (lp->phy_type == MII_BCM5221_ID) {
221 read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
222 if (!(phy & (1 << 0)))
223 goto done;
224 }
225 else if (lp->phy_type == MII_KS8721_ID) {
226 read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
227 if (!(phy & ((1 << 2) | 1)))
228 goto done;
229 }
230 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
231 read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy);
232 if (!(phy & ((1 << 2) | 1)))
233 goto done;
234 }
235 else if (lp->phy_type == MII_DP83848_ID) {
236 read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
237 if (!(phy & (1 << 7)))
238 goto done;
239 }
240
241 update_linkspeed(dev, 0);
242
243done:
244 disable_mdi();
245
246 return IRQ_HANDLED;
247}
248
249/*
250 * Initialize and enable the PHY interrupt for link-state changes
251 */
252static void enable_phyirq(struct net_device *dev)
253{
254 struct at91_private *lp = netdev_priv(dev);
255 unsigned int dsintr, irq_number;
256 int status;
257
258 irq_number = lp->board_data.phy_irq_pin;
259 if (!irq_number) {
260 /*
261 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
262 * or board does not have it connected.
263 */
264 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
265 return;
266 }
267
268 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
269 if (status) {
270 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
271 return;
272 }
273
274 spin_lock_irq(&lp->lock);
275 enable_mdi();
276
277 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
278 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
279 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
280 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
281 }
282 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
283 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
284 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
285 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
286 }
287 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
288 dsintr = (1 << 15) | ( 1 << 14);
289 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
290 }
291 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
292 dsintr = (1 << 10) | ( 1 << 8);
293 write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
294 }
295 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
296 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
297 dsintr = dsintr | 0x500; /* set bits 8, 10 */
298 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
299 }
300 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
301 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
302 dsintr = dsintr | 0x3c; /* set bits 2..5 */
303 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
304 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
305 dsintr = dsintr | 0x3; /* set bits 0,1 */
306 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
307 }
308
309 disable_mdi();
310 spin_unlock_irq(&lp->lock);
311}
312
313/*
314 * Disable the PHY interrupt
315 */
316static void disable_phyirq(struct net_device *dev)
317{
318 struct at91_private *lp = netdev_priv(dev);
319 unsigned int dsintr;
320 unsigned int irq_number;
321
322 irq_number = lp->board_data.phy_irq_pin;
323 if (!irq_number) {
324 del_timer_sync(&lp->check_timer);
325 return;
326 }
327
328 spin_lock_irq(&lp->lock);
329 enable_mdi();
330
331 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
332 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
333 dsintr = dsintr | 0xf00; /* set bits 8..11 */
334 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
335 }
336 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
337 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
338 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
339 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
340 }
341 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
342 read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr);
343 dsintr = ~(1 << 14);
344 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
345 }
346 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
347 read_phy(lp->phy_address, MII_TPISTATUS, &dsintr);
348 dsintr = ~((1 << 10) | (1 << 8));
349 write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
350 }
351 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
352 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
353 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
354 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
355 }
356 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
357 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
358 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
359 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
360 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
361 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
362 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
363 }
364
365 disable_mdi();
366 spin_unlock_irq(&lp->lock);
367
368 free_irq(irq_number, dev); /* Free interrupt handler */
369}
370
371/*
372 * Perform a software reset of the PHY.
373 */
374#if 0
375static void reset_phy(struct net_device *dev)
376{
377 struct at91_private *lp = netdev_priv(dev);
378 unsigned int bmcr;
379
380 spin_lock_irq(&lp->lock);
381 enable_mdi();
382
383 /* Perform PHY reset */
384 write_phy(lp->phy_address, MII_BMCR, BMCR_RESET);
385
386 /* Wait until PHY reset is complete */
387 do {
388 read_phy(lp->phy_address, MII_BMCR, &bmcr);
389 } while (!(bmcr & BMCR_RESET));
390
391 disable_mdi();
392 spin_unlock_irq(&lp->lock);
393}
394#endif
395
396static void at91ether_check_link(unsigned long dev_id)
397{
398 struct net_device *dev = (struct net_device *) dev_id;
399 struct at91_private *lp = netdev_priv(dev);
400
401 enable_mdi();
402 update_linkspeed(dev, 1);
403 disable_mdi();
404
405 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
406}
407
408/* ......................... ADDRESS MANAGEMENT ........................ */
409
410/*
411 * NOTE: Your bootloader must always set the MAC address correctly before
412 * booting into Linux.
413 *
414 * - It must always set the MAC address after reset, even if it doesn't
415 * happen to access the Ethernet while it's booting. Some versions of
416 * U-Boot on the AT91RM9200-DK do not do this.
417 *
418 * - Likewise it must store the addresses in the correct byte order.
419 * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
420 * continues to do so, for bug-compatibility).
421 */
422
423static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
424{
425 char addr[6];
426
427 if (machine_is_csb337()) {
428 addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
429 addr[4] = (lo & 0xff00) >> 8;
430 addr[3] = (lo & 0xff0000) >> 16;
431 addr[2] = (lo & 0xff000000) >> 24;
432 addr[1] = (hi & 0xff);
433 addr[0] = (hi & 0xff00) >> 8;
434 }
435 else {
436 addr[0] = (lo & 0xff);
437 addr[1] = (lo & 0xff00) >> 8;
438 addr[2] = (lo & 0xff0000) >> 16;
439 addr[3] = (lo & 0xff000000) >> 24;
440 addr[4] = (hi & 0xff);
441 addr[5] = (hi & 0xff00) >> 8;
442 }
443
444 if (is_valid_ether_addr(addr)) {
445 memcpy(dev->dev_addr, &addr, 6);
446 return 1;
447 }
448 return 0;
449}
450
451/*
452 * Set the ethernet MAC address in dev->dev_addr
453 */
454static void __init get_mac_address(struct net_device *dev)
455{
456 /* Check Specific-Address 1 */
457 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L)))
458 return;
459 /* Check Specific-Address 2 */
460 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L)))
461 return;
462 /* Check Specific-Address 3 */
463 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L)))
464 return;
465 /* Check Specific-Address 4 */
466 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L)))
467 return;
468
469 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
470}
471
472/*
473 * Program the hardware MAC address from dev->dev_addr.
474 */
475static void update_mac_address(struct net_device *dev)
476{
477 at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
478 at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
479
480 at91_emac_write(AT91_EMAC_SA2L, 0);
481 at91_emac_write(AT91_EMAC_SA2H, 0);
482}
483
484/*
485 * Store the new hardware address in dev->dev_addr, and update the MAC.
486 */
487static int set_mac_address(struct net_device *dev, void* addr)
488{
489 struct sockaddr *address = addr;
490
491 if (!is_valid_ether_addr(address->sa_data))
492 return -EADDRNOTAVAIL;
493
494 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
495 update_mac_address(dev);
496
497 printk("%s: Setting MAC address to %pM\n", dev->name,
498 dev->dev_addr);
499
500 return 0;
501}
502
503static int inline hash_bit_value(int bitnr, __u8 *addr)
504{
505 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
506 return 1;
507 return 0;
508}
509
510/*
511 * The hash address register is 64 bits long and takes up two locations in the memory map.
512 * The least significant bits are stored in EMAC_HSL and the most significant
513 * bits in EMAC_HSH.
514 *
515 * The unicast hash enable and the multicast hash enable bits in the network configuration
516 * register enable the reception of hash matched frames. The destination address is
517 * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
518 * The hash function is an exclusive or of every sixth bit of the destination address.
519 * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
520 * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
521 * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
522 * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
523 * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
524 * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
525 * da[0] represents the least significant bit of the first byte received, that is, the multicast/
526 * unicast indicator, and da[47] represents the most significant bit of the last byte
527 * received.
528 * If the hash index points to a bit that is set in the hash register then the frame will be
529 * matched according to whether the frame is multicast or unicast.
530 * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
531 * the hash index points to a bit set in the hash register.
532 * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
533 * hash index points to a bit set in the hash register.
534 * To receive all multicast frames, the hash register should be set with all ones and the
535 * multicast hash enable bit should be set in the network configuration register.
536 */
537
538/*
539 * Return the hash index value for the specified address.
540 */
541static int hash_get_index(__u8 *addr)
542{
543 int i, j, bitval;
544 int hash_index = 0;
545
546 for (j = 0; j < 6; j++) {
547 for (i = 0, bitval = 0; i < 8; i++)
548 bitval ^= hash_bit_value(i*6 + j, addr);
549
550 hash_index |= (bitval << j);
551 }
552
553 return hash_index;
554}
555
556/*
557 * Add multicast addresses to the internal multicast-hash table.
558 */
559static void at91ether_sethashtable(struct net_device *dev)
560{
561 struct netdev_hw_addr *ha;
562 unsigned long mc_filter[2];
563 unsigned int bitnr;
564
565 mc_filter[0] = mc_filter[1] = 0;
566
567 netdev_for_each_mc_addr(ha, dev) {
568 bitnr = hash_get_index(ha->addr);
569 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
570 }
571
572 at91_emac_write(AT91_EMAC_HSL, mc_filter[0]);
573 at91_emac_write(AT91_EMAC_HSH, mc_filter[1]);
574}
575
576/*
577 * Enable/Disable promiscuous and multicast modes.
578 */
579static void at91ether_set_multicast_list(struct net_device *dev)
580{
581 unsigned long cfg;
582
583 cfg = at91_emac_read(AT91_EMAC_CFG);
584
585 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
586 cfg |= AT91_EMAC_CAF;
587 else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
588 cfg &= ~AT91_EMAC_CAF;
589
590 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
591 at91_emac_write(AT91_EMAC_HSH, -1);
592 at91_emac_write(AT91_EMAC_HSL, -1);
593 cfg |= AT91_EMAC_MTI;
594 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
595 at91ether_sethashtable(dev);
596 cfg |= AT91_EMAC_MTI;
597 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
598 at91_emac_write(AT91_EMAC_HSH, 0);
599 at91_emac_write(AT91_EMAC_HSL, 0);
600 cfg &= ~AT91_EMAC_MTI;
601 }
602
603 at91_emac_write(AT91_EMAC_CFG, cfg);
604}
605
606/* ......................... ETHTOOL SUPPORT ........................... */
607
608static int mdio_read(struct net_device *dev, int phy_id, int location)
609{
610 unsigned int value;
611
612 read_phy(phy_id, location, &value);
613 return value;
614}
615
616static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
617{
618 write_phy(phy_id, location, value);
619}
620
621static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
622{
623 struct at91_private *lp = netdev_priv(dev);
624 int ret;
625
626 spin_lock_irq(&lp->lock);
627 enable_mdi();
628
629 ret = mii_ethtool_gset(&lp->mii, cmd);
630
631 disable_mdi();
632 spin_unlock_irq(&lp->lock);
633
634 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
635 cmd->supported = SUPPORTED_FIBRE;
636 cmd->port = PORT_FIBRE;
637 }
638
639 return ret;
640}
641
642static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
643{
644 struct at91_private *lp = netdev_priv(dev);
645 int ret;
646
647 spin_lock_irq(&lp->lock);
648 enable_mdi();
649
650 ret = mii_ethtool_sset(&lp->mii, cmd);
651
652 disable_mdi();
653 spin_unlock_irq(&lp->lock);
654
655 return ret;
656}
657
658static int at91ether_nwayreset(struct net_device *dev)
659{
660 struct at91_private *lp = netdev_priv(dev);
661 int ret;
662
663 spin_lock_irq(&lp->lock);
664 enable_mdi();
665
666 ret = mii_nway_restart(&lp->mii);
667
668 disable_mdi();
669 spin_unlock_irq(&lp->lock);
670
671 return ret;
672}
673
674static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
675{
676 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
677 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
678 strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
679}
680
681static const struct ethtool_ops at91ether_ethtool_ops = {
682 .get_settings = at91ether_get_settings,
683 .set_settings = at91ether_set_settings,
684 .get_drvinfo = at91ether_get_drvinfo,
685 .nway_reset = at91ether_nwayreset,
686 .get_link = ethtool_op_get_link,
687};
688
689static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
690{
691 struct at91_private *lp = netdev_priv(dev);
692 int res;
693
694 if (!netif_running(dev))
695 return -EINVAL;
696
697 spin_lock_irq(&lp->lock);
698 enable_mdi();
699 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
700 disable_mdi();
701 spin_unlock_irq(&lp->lock);
702
703 return res;
704}
705
706/* ................................ MAC ................................ */
707
708/*
709 * Initialize and start the Receiver and Transmit subsystems
710 */
711static void at91ether_start(struct net_device *dev)
712{
713 struct at91_private *lp = netdev_priv(dev);
714 struct recv_desc_bufs *dlist, *dlist_phys;
715 int i;
716 unsigned long ctl;
717
718 dlist = lp->dlist;
719 dlist_phys = lp->dlist_phys;
720
721 for (i = 0; i < MAX_RX_DESCR; i++) {
722 dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
723 dlist->descriptors[i].size = 0;
724 }
725
726 /* Set the Wrap bit on the last descriptor */
727 dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
728
729 /* Reset buffer index */
730 lp->rxBuffIndex = 0;
731
732 /* Program address of descriptor list in Rx Buffer Queue register */
733 at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys);
734
735 /* Enable Receive and Transmit */
736 ctl = at91_emac_read(AT91_EMAC_CTL);
737 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
738}
739
740/*
741 * Open the ethernet interface
742 */
743static int at91ether_open(struct net_device *dev)
744{
745 struct at91_private *lp = netdev_priv(dev);
746 unsigned long ctl;
747
748 if (!is_valid_ether_addr(dev->dev_addr))
749 return -EADDRNOTAVAIL;
750
751 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
752
753 /* Clear internal statistics */
754 ctl = at91_emac_read(AT91_EMAC_CTL);
755 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
756
757 /* Update the MAC address (incase user has changed it) */
758 update_mac_address(dev);
759
760 /* Enable PHY interrupt */
761 enable_phyirq(dev);
762
763 /* Enable MAC interrupts */
764 at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
765 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
766 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
767
768 /* Determine current link speed */
769 spin_lock_irq(&lp->lock);
770 enable_mdi();
771 update_linkspeed(dev, 0);
772 disable_mdi();
773 spin_unlock_irq(&lp->lock);
774
775 at91ether_start(dev);
776 netif_start_queue(dev);
777 return 0;
778}
779
780/*
781 * Close the interface
782 */
783static int at91ether_close(struct net_device *dev)
784{
785 struct at91_private *lp = netdev_priv(dev);
786 unsigned long ctl;
787
788 /* Disable Receiver and Transmitter */
789 ctl = at91_emac_read(AT91_EMAC_CTL);
790 at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
791
792 /* Disable PHY interrupt */
793 disable_phyirq(dev);
794
795 /* Disable MAC interrupts */
796 at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
798 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
799
800 netif_stop_queue(dev);
801
802 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
803
804 return 0;
805}
806
807/*
808 * Transmit packet.
809 */
810static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
811{
812 struct at91_private *lp = netdev_priv(dev);
813
814 if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
815 netif_stop_queue(dev);
816
817 /* Store packet information (to free when Tx completed) */
818 lp->skb = skb;
819 lp->skb_length = skb->len;
820 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
821 dev->stats.tx_bytes += skb->len;
822
823 /* Set address of the data in the Transmit Address register */
824 at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
825 /* Set length of the packet in the Transmit Control register */
826 at91_emac_write(AT91_EMAC_TCR, skb->len);
827
828 } else {
829 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
830 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
831 on this skb, he also reports -ENETDOWN and printk's, so either
832 we free and return(0) or don't free and return 1 */
833 }
834
835 return NETDEV_TX_OK;
836}
837
838/*
839 * Update the current statistics from the internal statistics registers.
840 */
841static struct net_device_stats *at91ether_stats(struct net_device *dev)
842{
843 int ale, lenerr, seqe, lcol, ecol;
844
845 if (netif_running(dev)) {
846 dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
847 ale = at91_emac_read(AT91_EMAC_ALE);
848 dev->stats.rx_frame_errors += ale; /* Alignment errors */
849 lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
850 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
851 seqe = at91_emac_read(AT91_EMAC_SEQE);
852 dev->stats.rx_crc_errors += seqe; /* CRC error */
853 dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
854 dev->stats.rx_errors += (ale + lenerr + seqe
855 + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
856
857 dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
858 dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
859 dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
860 dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
861
862 lcol = at91_emac_read(AT91_EMAC_LCOL);
863 ecol = at91_emac_read(AT91_EMAC_ECOL);
864 dev->stats.tx_window_errors += lcol; /* Late collisions */
865 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
866
867 dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
868 }
869 return &dev->stats;
870}
871
872/*
873 * Extract received frame from buffer descriptors and sent to upper layers.
874 * (Called from interrupt context)
875 */
876static void at91ether_rx(struct net_device *dev)
877{
878 struct at91_private *lp = netdev_priv(dev);
879 struct recv_desc_bufs *dlist;
880 unsigned char *p_recv;
881 struct sk_buff *skb;
882 unsigned int pktlen;
883
884 dlist = lp->dlist;
885 while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
886 p_recv = dlist->recv_buf[lp->rxBuffIndex];
887 pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
888 skb = dev_alloc_skb(pktlen + 2);
889 if (skb != NULL) {
890 skb_reserve(skb, 2);
891 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
892
893 skb->protocol = eth_type_trans(skb, dev);
894 dev->stats.rx_bytes += pktlen;
895 netif_rx(skb);
896 }
897 else {
898 dev->stats.rx_dropped += 1;
899 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
900 }
901
902 if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
903 dev->stats.multicast++;
904
905 dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
906 if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
907 lp->rxBuffIndex = 0;
908 else
909 lp->rxBuffIndex++;
910 }
911}
912
913/*
914 * MAC interrupt handler
915 */
916static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
917{
918 struct net_device *dev = (struct net_device *) dev_id;
919 struct at91_private *lp = netdev_priv(dev);
920 unsigned long intstatus, ctl;
921
922 /* MAC Interrupt Status register indicates what interrupts are pending.
923 It is automatically cleared once read. */
924 intstatus = at91_emac_read(AT91_EMAC_ISR);
925
926 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
927 at91ether_rx(dev);
928
929 if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
930 /* The TCOM bit is set even if the transmission failed. */
931 if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
932 dev->stats.tx_errors += 1;
933
934 if (lp->skb) {
935 dev_kfree_skb_irq(lp->skb);
936 lp->skb = NULL;
937 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
938 }
939 netif_wake_queue(dev);
940 }
941
942 /* Work-around for Errata #11 */
943 if (intstatus & AT91_EMAC_RBNA) {
944 ctl = at91_emac_read(AT91_EMAC_CTL);
945 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
946 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
947 }
948
949 if (intstatus & AT91_EMAC_ROVR)
950 printk("%s: ROVR error\n", dev->name);
951
952 return IRQ_HANDLED;
953}
954
955#ifdef CONFIG_NET_POLL_CONTROLLER
956static void at91ether_poll_controller(struct net_device *dev)
957{
958 unsigned long flags;
959
960 local_irq_save(flags);
961 at91ether_interrupt(dev->irq, dev);
962 local_irq_restore(flags);
963}
964#endif
965
966static const struct net_device_ops at91ether_netdev_ops = {
967 .ndo_open = at91ether_open,
968 .ndo_stop = at91ether_close,
969 .ndo_start_xmit = at91ether_start_xmit,
970 .ndo_get_stats = at91ether_stats,
971 .ndo_set_multicast_list = at91ether_set_multicast_list,
972 .ndo_set_mac_address = set_mac_address,
973 .ndo_do_ioctl = at91ether_ioctl,
974 .ndo_validate_addr = eth_validate_addr,
975 .ndo_change_mtu = eth_change_mtu,
976#ifdef CONFIG_NET_POLL_CONTROLLER
977 .ndo_poll_controller = at91ether_poll_controller,
978#endif
979};
980
981/*
982 * Initialize the ethernet interface
983 */
984static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
985 struct platform_device *pdev, struct clk *ether_clk)
986{
987 struct at91_eth_data *board_data = pdev->dev.platform_data;
988 struct net_device *dev;
989 struct at91_private *lp;
990 unsigned int val;
991 int res;
992
993 dev = alloc_etherdev(sizeof(struct at91_private));
994 if (!dev)
995 return -ENOMEM;
996
997 dev->base_addr = AT91_VA_BASE_EMAC;
998 dev->irq = AT91RM9200_ID_EMAC;
999
1000 /* Install the interrupt handler */
1001 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
1002 free_netdev(dev);
1003 return -EBUSY;
1004 }
1005
1006 /* Allocate memory for DMA Receive descriptors */
1007 lp = netdev_priv(dev);
1008 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1009 if (lp->dlist == NULL) {
1010 free_irq(dev->irq, dev);
1011 free_netdev(dev);
1012 return -ENOMEM;
1013 }
1014 lp->board_data = *board_data;
1015 lp->ether_clk = ether_clk;
1016 platform_set_drvdata(pdev, dev);
1017
1018 spin_lock_init(&lp->lock);
1019
1020 ether_setup(dev);
1021 dev->netdev_ops = &at91ether_netdev_ops;
1022 dev->ethtool_ops = &at91ether_ethtool_ops;
1023
1024 SET_NETDEV_DEV(dev, &pdev->dev);
1025
1026 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
1027 update_mac_address(dev); /* Program ethernet address into MAC */
1028
1029 at91_emac_write(AT91_EMAC_CTL, 0);
1030
1031 if (lp->board_data.is_rmii)
1032 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
1033 else
1034 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
1035
1036 /* Perform PHY-specific initialization */
1037 spin_lock_irq(&lp->lock);
1038 enable_mdi();
1039 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
1040 read_phy(phy_address, MII_DSCR_REG, &val);
1041 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
1042 lp->phy_media = PORT_FIBRE;
1043 } else if (machine_is_csb337()) {
1044 /* mix link activity status into LED2 link state */
1045 write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
1046 } else if (machine_is_ecbat91())
1047 write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
1048
1049 disable_mdi();
1050 spin_unlock_irq(&lp->lock);
1051
1052 lp->mii.dev = dev; /* Support for ethtool */
1053 lp->mii.mdio_read = mdio_read;
1054 lp->mii.mdio_write = mdio_write;
1055 lp->mii.phy_id = phy_address;
1056 lp->mii.phy_id_mask = 0x1f;
1057 lp->mii.reg_num_mask = 0x1f;
1058
1059 lp->phy_type = phy_type; /* Type of PHY connected */
1060 lp->phy_address = phy_address; /* MDI address of PHY */
1061
1062 /* Register the network interface */
1063 res = register_netdev(dev);
1064 if (res) {
1065 free_irq(dev->irq, dev);
1066 free_netdev(dev);
1067 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1068 return res;
1069 }
1070
1071 /* Determine current link speed */
1072 spin_lock_irq(&lp->lock);
1073 enable_mdi();
1074 update_linkspeed(dev, 0);
1075 disable_mdi();
1076 spin_unlock_irq(&lp->lock);
1077 netif_carrier_off(dev); /* will be enabled in open() */
1078
1079 /* If board has no PHY IRQ, use a timer to poll the PHY */
1080 if (!lp->board_data.phy_irq_pin) {
1081 init_timer(&lp->check_timer);
1082 lp->check_timer.data = (unsigned long)dev;
1083 lp->check_timer.function = at91ether_check_link;
1084 } else if (lp->board_data.phy_irq_pin >= 32)
1085 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1086
1087 /* Display ethernet banner */
1088 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
1089 dev->name, (uint) dev->base_addr, dev->irq,
1090 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1091 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1092 dev->dev_addr);
1093 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1094 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1095 else if (phy_type == MII_LXT971A_ID)
1096 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1097 else if (phy_type == MII_RTL8201_ID)
1098 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1099 else if (phy_type == MII_BCM5221_ID)
1100 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1101 else if (phy_type == MII_DP83847_ID)
1102 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1103 else if (phy_type == MII_DP83848_ID)
1104 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1105 else if (phy_type == MII_AC101L_ID)
1106 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1107 else if (phy_type == MII_KS8721_ID)
1108 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1109 else if (phy_type == MII_T78Q21x3_ID)
1110 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1111 else if (phy_type == MII_LAN83C185_ID)
1112 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1113
1114 return 0;
1115}
1116
1117/*
1118 * Detect MAC and PHY and perform initialization
1119 */
1120static int __init at91ether_probe(struct platform_device *pdev)
1121{
1122 unsigned int phyid1, phyid2;
1123 int detected = -1;
1124 unsigned long phy_id;
1125 unsigned short phy_address = 0;
1126 struct clk *ether_clk;
1127
1128 ether_clk = clk_get(&pdev->dev, "ether_clk");
1129 if (IS_ERR(ether_clk)) {
1130 printk(KERN_ERR "at91_ether: no clock defined\n");
1131 return -ENODEV;
1132 }
1133 clk_enable(ether_clk); /* Enable Peripheral clock */
1134
1135 while ((detected != 0) && (phy_address < 32)) {
1136 /* Read the PHY ID registers */
1137 enable_mdi();
1138 read_phy(phy_address, MII_PHYSID1, &phyid1);
1139 read_phy(phy_address, MII_PHYSID2, &phyid2);
1140 disable_mdi();
1141
1142 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1143 switch (phy_id) {
1144 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1145 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1146 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1147 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1148 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1149 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1150 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1151 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1152 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1153 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1154 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1155 detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
1156 break;
1157 }
1158
1159 phy_address++;
1160 }
1161
1162 clk_disable(ether_clk); /* Disable Peripheral clock */
1163
1164 return detected;
1165}
1166
1167static int __devexit at91ether_remove(struct platform_device *pdev)
1168{
1169 struct net_device *dev = platform_get_drvdata(pdev);
1170 struct at91_private *lp = netdev_priv(dev);
1171
1172 if (lp->board_data.phy_irq_pin >= 32)
1173 gpio_free(lp->board_data.phy_irq_pin);
1174
1175 unregister_netdev(dev);
1176 free_irq(dev->irq, dev);
1177 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1178 clk_put(lp->ether_clk);
1179
1180 platform_set_drvdata(pdev, NULL);
1181 free_netdev(dev);
1182 return 0;
1183}
1184
1185#ifdef CONFIG_PM
1186
1187static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1188{
1189 struct net_device *net_dev = platform_get_drvdata(pdev);
1190 struct at91_private *lp = netdev_priv(net_dev);
1191 int phy_irq = lp->board_data.phy_irq_pin;
1192
1193 if (netif_running(net_dev)) {
1194 if (phy_irq)
1195 disable_irq(phy_irq);
1196
1197 netif_stop_queue(net_dev);
1198 netif_device_detach(net_dev);
1199
1200 clk_disable(lp->ether_clk);
1201 }
1202 return 0;
1203}
1204
1205static int at91ether_resume(struct platform_device *pdev)
1206{
1207 struct net_device *net_dev = platform_get_drvdata(pdev);
1208 struct at91_private *lp = netdev_priv(net_dev);
1209 int phy_irq = lp->board_data.phy_irq_pin;
1210
1211 if (netif_running(net_dev)) {
1212 clk_enable(lp->ether_clk);
1213
1214 netif_device_attach(net_dev);
1215 netif_start_queue(net_dev);
1216
1217 if (phy_irq)
1218 enable_irq(phy_irq);
1219 }
1220 return 0;
1221}
1222
1223#else
1224#define at91ether_suspend NULL
1225#define at91ether_resume NULL
1226#endif
1227
1228static struct platform_driver at91ether_driver = {
1229 .remove = __devexit_p(at91ether_remove),
1230 .suspend = at91ether_suspend,
1231 .resume = at91ether_resume,
1232 .driver = {
1233 .name = DRV_NAME,
1234 .owner = THIS_MODULE,
1235 },
1236};
1237
1238static int __init at91ether_init(void)
1239{
1240 return platform_driver_probe(&at91ether_driver, at91ether_probe);
1241}
1242
1243static void __exit at91ether_exit(void)
1244{
1245 platform_driver_unregister(&at91ether_driver);
1246}
1247
1248module_init(at91ether_init)
1249module_exit(at91ether_exit)
1250
1251MODULE_LICENSE("GPL");
1252MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
1253MODULE_AUTHOR("Andrew Victor");
1254MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/arm/at91_ether.h
new file mode 100644
index 00000000000..353f4dab62b
--- /dev/null
+++ b/drivers/net/arm/at91_ether.h
@@ -0,0 +1,109 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef AT91_ETHERNET
16#define AT91_ETHERNET
17
18
19/* Davicom 9161 PHY */
20#define MII_DM9161_ID 0x0181b880
21#define MII_DM9161A_ID 0x0181b8a0
22#define MII_DSCR_REG 16
23#define MII_DSCSR_REG 17
24#define MII_DSINTR_REG 21
25
26/* Intel LXT971A PHY */
27#define MII_LXT971A_ID 0x001378E0
28#define MII_ISINTE_REG 18
29#define MII_ISINTS_REG 19
30#define MII_LEDCTRL_REG 20
31
32/* Realtek RTL8201 PHY */
33#define MII_RTL8201_ID 0x00008200
34
35/* Broadcom BCM5221 PHY */
36#define MII_BCM5221_ID 0x004061e0
37#define MII_BCMINTR_REG 26
38
39/* National Semiconductor DP83847 */
40#define MII_DP83847_ID 0x20005c30
41
42/* National Semiconductor DP83848 */
43#define MII_DP83848_ID 0x20005c90
44#define MII_DPPHYSTS_REG 16
45#define MII_DPMICR_REG 17
46#define MII_DPMISR_REG 18
47
48/* Altima AC101L PHY */
49#define MII_AC101L_ID 0x00225520
50
51/* Micrel KS8721 PHY */
52#define MII_KS8721_ID 0x00221610
53
54/* Teridian 78Q2123/78Q2133 */
55#define MII_T78Q21x3_ID 0x000e7230
56#define MII_T78Q21INT_REG 17
57
58/* SMSC LAN83C185 */
59#define MII_LAN83C185_ID 0x0007C0A0
60
61/* ........................................................................ */
62
63#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
64#define MAX_RX_DESCR 9 /* max number of receive buffers */
65
66#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
67#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
68
69#define EMAC_BROADCAST 0x80000000 /* broadcast address */
70#define EMAC_MULTICAST 0x40000000 /* multicast address */
71#define EMAC_UNICAST 0x20000000 /* unicast address */
72
73struct rbf_t
74{
75 unsigned int addr;
76 unsigned long size;
77};
78
79struct recv_desc_bufs
80{
81 struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
82 char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
83};
84
85struct at91_private
86{
87 struct mii_if_info mii; /* ethtool support */
88 struct at91_eth_data board_data; /* board-specific configuration */
89 struct clk *ether_clk; /* clock */
90
91 /* PHY */
92 unsigned long phy_type; /* type of PHY (PHY_ID) */
93 spinlock_t lock; /* lock for MDI interface */
94 short phy_media; /* media interface type */
95 unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
96 struct timer_list check_timer; /* Poll link status */
97
98 /* Transmit */
99 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
100 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
101 int skb_length; /* saved skb length for pci_unmap_single */
102
103 /* Receive */
104 int rxBuffIndex; /* index into receive descriptor list */
105 struct recv_desc_bufs *dlist; /* descriptor list address */
106 struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
107};
108
109#endif
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
new file mode 100644
index 00000000000..4317af8d2f0
--- /dev/null
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -0,0 +1,904 @@
1/*
2 * EP93xx ethernet network device driver
3 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
13
14#include <linux/dma-mapping.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/mii.h>
19#include <linux/etherdevice.h>
20#include <linux/ethtool.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/slab.h>
28
29#include <mach/hardware.h>
30
31#define DRV_MODULE_NAME "ep93xx-eth"
32#define DRV_MODULE_VERSION "0.1"
33
34#define RX_QUEUE_ENTRIES 64
35#define TX_QUEUE_ENTRIES 8
36
37#define MAX_PKT_SIZE 2044
38#define PKT_BUF_SIZE 2048
39
40#define REG_RXCTL 0x0000
41#define REG_RXCTL_DEFAULT 0x00073800
42#define REG_TXCTL 0x0004
43#define REG_TXCTL_ENABLE 0x00000001
44#define REG_MIICMD 0x0010
45#define REG_MIICMD_READ 0x00008000
46#define REG_MIICMD_WRITE 0x00004000
47#define REG_MIIDATA 0x0014
48#define REG_MIISTS 0x0018
49#define REG_MIISTS_BUSY 0x00000001
50#define REG_SELFCTL 0x0020
51#define REG_SELFCTL_RESET 0x00000001
52#define REG_INTEN 0x0024
53#define REG_INTEN_TX 0x00000008
54#define REG_INTEN_RX 0x00000007
55#define REG_INTSTSP 0x0028
56#define REG_INTSTS_TX 0x00000008
57#define REG_INTSTS_RX 0x00000004
58#define REG_INTSTSC 0x002c
59#define REG_AFP 0x004c
60#define REG_INDAD0 0x0050
61#define REG_INDAD1 0x0051
62#define REG_INDAD2 0x0052
63#define REG_INDAD3 0x0053
64#define REG_INDAD4 0x0054
65#define REG_INDAD5 0x0055
66#define REG_GIINTMSK 0x0064
67#define REG_GIINTMSK_ENABLE 0x00008000
68#define REG_BMCTL 0x0080
69#define REG_BMCTL_ENABLE_TX 0x00000100
70#define REG_BMCTL_ENABLE_RX 0x00000001
71#define REG_BMSTS 0x0084
72#define REG_BMSTS_RX_ACTIVE 0x00000008
73#define REG_RXDQBADD 0x0090
74#define REG_RXDQBLEN 0x0094
75#define REG_RXDCURADD 0x0098
76#define REG_RXDENQ 0x009c
77#define REG_RXSTSQBADD 0x00a0
78#define REG_RXSTSQBLEN 0x00a4
79#define REG_RXSTSQCURADD 0x00a8
80#define REG_RXSTSENQ 0x00ac
81#define REG_TXDQBADD 0x00b0
82#define REG_TXDQBLEN 0x00b4
83#define REG_TXDQCURADD 0x00b8
84#define REG_TXDENQ 0x00bc
85#define REG_TXSTSQBADD 0x00c0
86#define REG_TXSTSQBLEN 0x00c4
87#define REG_TXSTSQCURADD 0x00c8
88#define REG_MAXFRMLEN 0x00e8
89
90struct ep93xx_rdesc
91{
92 u32 buf_addr;
93 u32 rdesc1;
94};
95
96#define RDESC1_NSOF 0x80000000
97#define RDESC1_BUFFER_INDEX 0x7fff0000
98#define RDESC1_BUFFER_LENGTH 0x0000ffff
99
100struct ep93xx_rstat
101{
102 u32 rstat0;
103 u32 rstat1;
104};
105
106#define RSTAT0_RFP 0x80000000
107#define RSTAT0_RWE 0x40000000
108#define RSTAT0_EOF 0x20000000
109#define RSTAT0_EOB 0x10000000
110#define RSTAT0_AM 0x00c00000
111#define RSTAT0_RX_ERR 0x00200000
112#define RSTAT0_OE 0x00100000
113#define RSTAT0_FE 0x00080000
114#define RSTAT0_RUNT 0x00040000
115#define RSTAT0_EDATA 0x00020000
116#define RSTAT0_CRCE 0x00010000
117#define RSTAT0_CRCI 0x00008000
118#define RSTAT0_HTI 0x00003f00
119#define RSTAT1_RFP 0x80000000
120#define RSTAT1_BUFFER_INDEX 0x7fff0000
121#define RSTAT1_FRAME_LENGTH 0x0000ffff
122
123struct ep93xx_tdesc
124{
125 u32 buf_addr;
126 u32 tdesc1;
127};
128
129#define TDESC1_EOF 0x80000000
130#define TDESC1_BUFFER_INDEX 0x7fff0000
131#define TDESC1_BUFFER_ABORT 0x00008000
132#define TDESC1_BUFFER_LENGTH 0x00000fff
133
134struct ep93xx_tstat
135{
136 u32 tstat0;
137};
138
139#define TSTAT0_TXFP 0x80000000
140#define TSTAT0_TXWE 0x40000000
141#define TSTAT0_FA 0x20000000
142#define TSTAT0_LCRS 0x10000000
143#define TSTAT0_OW 0x04000000
144#define TSTAT0_TXU 0x02000000
145#define TSTAT0_ECOLL 0x01000000
146#define TSTAT0_NCOLL 0x001f0000
147#define TSTAT0_BUFFER_INDEX 0x00007fff
148
149struct ep93xx_descs
150{
151 struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES];
152 struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES];
153 struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES];
154 struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES];
155};
156
157struct ep93xx_priv
158{
159 struct resource *res;
160 void __iomem *base_addr;
161 int irq;
162
163 struct ep93xx_descs *descs;
164 dma_addr_t descs_dma_addr;
165
166 void *rx_buf[RX_QUEUE_ENTRIES];
167 void *tx_buf[TX_QUEUE_ENTRIES];
168
169 spinlock_t rx_lock;
170 unsigned int rx_pointer;
171 unsigned int tx_clean_pointer;
172 unsigned int tx_pointer;
173 spinlock_t tx_pending_lock;
174 unsigned int tx_pending;
175
176 struct net_device *dev;
177 struct napi_struct napi;
178
179 struct mii_if_info mii;
180 u8 mdc_divisor;
181};
182
183#define rdb(ep, off) __raw_readb((ep)->base_addr + (off))
184#define rdw(ep, off) __raw_readw((ep)->base_addr + (off))
185#define rdl(ep, off) __raw_readl((ep)->base_addr + (off))
186#define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off))
187#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
188#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
189
190static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
191{
192 struct ep93xx_priv *ep = netdev_priv(dev);
193 int data;
194 int i;
195
196 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
197
198 for (i = 0; i < 10; i++) {
199 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
200 break;
201 msleep(1);
202 }
203
204 if (i == 10) {
205 pr_info("mdio read timed out\n");
206 data = 0xffff;
207 } else {
208 data = rdl(ep, REG_MIIDATA);
209 }
210
211 return data;
212}
213
214static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
215{
216 struct ep93xx_priv *ep = netdev_priv(dev);
217 int i;
218
219 wrl(ep, REG_MIIDATA, data);
220 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
221
222 for (i = 0; i < 10; i++) {
223 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
224 break;
225 msleep(1);
226 }
227
228 if (i == 10)
229 pr_info("mdio write timed out\n");
230}
231
232static int ep93xx_rx(struct net_device *dev, int processed, int budget)
233{
234 struct ep93xx_priv *ep = netdev_priv(dev);
235
236 while (processed < budget) {
237 int entry;
238 struct ep93xx_rstat *rstat;
239 u32 rstat0;
240 u32 rstat1;
241 int length;
242 struct sk_buff *skb;
243
244 entry = ep->rx_pointer;
245 rstat = ep->descs->rstat + entry;
246
247 rstat0 = rstat->rstat0;
248 rstat1 = rstat->rstat1;
249 if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP))
250 break;
251
252 rstat->rstat0 = 0;
253 rstat->rstat1 = 0;
254
255 if (!(rstat0 & RSTAT0_EOF))
256 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
257 if (!(rstat0 & RSTAT0_EOB))
258 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
259 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
260 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
261
262 if (!(rstat0 & RSTAT0_RWE)) {
263 dev->stats.rx_errors++;
264 if (rstat0 & RSTAT0_OE)
265 dev->stats.rx_fifo_errors++;
266 if (rstat0 & RSTAT0_FE)
267 dev->stats.rx_frame_errors++;
268 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
269 dev->stats.rx_length_errors++;
270 if (rstat0 & RSTAT0_CRCE)
271 dev->stats.rx_crc_errors++;
272 goto err;
273 }
274
275 length = rstat1 & RSTAT1_FRAME_LENGTH;
276 if (length > MAX_PKT_SIZE) {
277 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
278 goto err;
279 }
280
281 /* Strip FCS. */
282 if (rstat0 & RSTAT0_CRCI)
283 length -= 4;
284
285 skb = dev_alloc_skb(length + 2);
286 if (likely(skb != NULL)) {
287 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
288 skb_reserve(skb, 2);
289 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
290 length, DMA_FROM_DEVICE);
291 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
292 dma_sync_single_for_device(dev->dev.parent,
293 rxd->buf_addr, length,
294 DMA_FROM_DEVICE);
295 skb_put(skb, length);
296 skb->protocol = eth_type_trans(skb, dev);
297
298 netif_receive_skb(skb);
299
300 dev->stats.rx_packets++;
301 dev->stats.rx_bytes += length;
302 } else {
303 dev->stats.rx_dropped++;
304 }
305
306err:
307 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1);
308 processed++;
309 }
310
311 return processed;
312}
313
314static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
315{
316 struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
317 return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
318}
319
320static int ep93xx_poll(struct napi_struct *napi, int budget)
321{
322 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
323 struct net_device *dev = ep->dev;
324 int rx = 0;
325
326poll_some_more:
327 rx = ep93xx_rx(dev, rx, budget);
328 if (rx < budget) {
329 int more = 0;
330
331 spin_lock_irq(&ep->rx_lock);
332 __napi_complete(napi);
333 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
334 if (ep93xx_have_more_rx(ep)) {
335 wrl(ep, REG_INTEN, REG_INTEN_TX);
336 wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
337 more = 1;
338 }
339 spin_unlock_irq(&ep->rx_lock);
340
341 if (more && napi_reschedule(napi))
342 goto poll_some_more;
343 }
344
345 if (rx) {
346 wrw(ep, REG_RXDENQ, rx);
347 wrw(ep, REG_RXSTSENQ, rx);
348 }
349
350 return rx;
351}
352
353static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
354{
355 struct ep93xx_priv *ep = netdev_priv(dev);
356 struct ep93xx_tdesc *txd;
357 int entry;
358
359 if (unlikely(skb->len > MAX_PKT_SIZE)) {
360 dev->stats.tx_dropped++;
361 dev_kfree_skb(skb);
362 return NETDEV_TX_OK;
363 }
364
365 entry = ep->tx_pointer;
366 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
367
368 txd = &ep->descs->tdesc[entry];
369
370 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
371 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
372 DMA_TO_DEVICE);
373 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
374 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
375 DMA_TO_DEVICE);
376 dev_kfree_skb(skb);
377
378 spin_lock_irq(&ep->tx_pending_lock);
379 ep->tx_pending++;
380 if (ep->tx_pending == TX_QUEUE_ENTRIES)
381 netif_stop_queue(dev);
382 spin_unlock_irq(&ep->tx_pending_lock);
383
384 wrl(ep, REG_TXDENQ, 1);
385
386 return NETDEV_TX_OK;
387}
388
389static void ep93xx_tx_complete(struct net_device *dev)
390{
391 struct ep93xx_priv *ep = netdev_priv(dev);
392 int wake;
393
394 wake = 0;
395
396 spin_lock(&ep->tx_pending_lock);
397 while (1) {
398 int entry;
399 struct ep93xx_tstat *tstat;
400 u32 tstat0;
401
402 entry = ep->tx_clean_pointer;
403 tstat = ep->descs->tstat + entry;
404
405 tstat0 = tstat->tstat0;
406 if (!(tstat0 & TSTAT0_TXFP))
407 break;
408
409 tstat->tstat0 = 0;
410
411 if (tstat0 & TSTAT0_FA)
412 pr_crit("frame aborted %.8x\n", tstat0);
413 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
414 pr_crit("entry mismatch %.8x\n", tstat0);
415
416 if (tstat0 & TSTAT0_TXWE) {
417 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
418
419 dev->stats.tx_packets++;
420 dev->stats.tx_bytes += length;
421 } else {
422 dev->stats.tx_errors++;
423 }
424
425 if (tstat0 & TSTAT0_OW)
426 dev->stats.tx_window_errors++;
427 if (tstat0 & TSTAT0_TXU)
428 dev->stats.tx_fifo_errors++;
429 dev->stats.collisions += (tstat0 >> 16) & 0x1f;
430
431 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
432 if (ep->tx_pending == TX_QUEUE_ENTRIES)
433 wake = 1;
434 ep->tx_pending--;
435 }
436 spin_unlock(&ep->tx_pending_lock);
437
438 if (wake)
439 netif_wake_queue(dev);
440}
441
442static irqreturn_t ep93xx_irq(int irq, void *dev_id)
443{
444 struct net_device *dev = dev_id;
445 struct ep93xx_priv *ep = netdev_priv(dev);
446 u32 status;
447
448 status = rdl(ep, REG_INTSTSC);
449 if (status == 0)
450 return IRQ_NONE;
451
452 if (status & REG_INTSTS_RX) {
453 spin_lock(&ep->rx_lock);
454 if (likely(napi_schedule_prep(&ep->napi))) {
455 wrl(ep, REG_INTEN, REG_INTEN_TX);
456 __napi_schedule(&ep->napi);
457 }
458 spin_unlock(&ep->rx_lock);
459 }
460
461 if (status & REG_INTSTS_TX)
462 ep93xx_tx_complete(dev);
463
464 return IRQ_HANDLED;
465}
466
467static void ep93xx_free_buffers(struct ep93xx_priv *ep)
468{
469 struct device *dev = ep->dev->dev.parent;
470 int i;
471
472 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
473 dma_addr_t d;
474
475 d = ep->descs->rdesc[i].buf_addr;
476 if (d)
477 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
478
479 if (ep->rx_buf[i] != NULL)
480 kfree(ep->rx_buf[i]);
481 }
482
483 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
484 dma_addr_t d;
485
486 d = ep->descs->tdesc[i].buf_addr;
487 if (d)
488 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
489
490 if (ep->tx_buf[i] != NULL)
491 kfree(ep->tx_buf[i]);
492 }
493
494 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
495 ep->descs_dma_addr);
496}
497
498static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
499{
500 struct device *dev = ep->dev->dev.parent;
501 int i;
502
503 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
504 &ep->descs_dma_addr, GFP_KERNEL);
505 if (ep->descs == NULL)
506 return 1;
507
508 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
509 void *buf;
510 dma_addr_t d;
511
512 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
513 if (buf == NULL)
514 goto err;
515
516 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
517 if (dma_mapping_error(dev, d)) {
518 kfree(buf);
519 goto err;
520 }
521
522 ep->rx_buf[i] = buf;
523 ep->descs->rdesc[i].buf_addr = d;
524 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
525 }
526
527 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
528 void *buf;
529 dma_addr_t d;
530
531 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
532 if (buf == NULL)
533 goto err;
534
535 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, d)) {
537 kfree(buf);
538 goto err;
539 }
540
541 ep->tx_buf[i] = buf;
542 ep->descs->tdesc[i].buf_addr = d;
543 }
544
545 return 0;
546
547err:
548 ep93xx_free_buffers(ep);
549 return 1;
550}
551
552static int ep93xx_start_hw(struct net_device *dev)
553{
554 struct ep93xx_priv *ep = netdev_priv(dev);
555 unsigned long addr;
556 int i;
557
558 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
559 for (i = 0; i < 10; i++) {
560 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
561 break;
562 msleep(1);
563 }
564
565 if (i == 10) {
566 pr_crit("hw failed to reset\n");
567 return 1;
568 }
569
570 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9));
571
572 /* Does the PHY support preamble suppress? */
573 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0)
574 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8));
575
576 /* Receive descriptor ring. */
577 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc);
578 wrl(ep, REG_RXDQBADD, addr);
579 wrl(ep, REG_RXDCURADD, addr);
580 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc));
581
582 /* Receive status ring. */
583 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat);
584 wrl(ep, REG_RXSTSQBADD, addr);
585 wrl(ep, REG_RXSTSQCURADD, addr);
586 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat));
587
588 /* Transmit descriptor ring. */
589 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc);
590 wrl(ep, REG_TXDQBADD, addr);
591 wrl(ep, REG_TXDQCURADD, addr);
592 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc));
593
594 /* Transmit status ring. */
595 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat);
596 wrl(ep, REG_TXSTSQBADD, addr);
597 wrl(ep, REG_TXSTSQCURADD, addr);
598 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat));
599
600 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX);
601 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
602 wrl(ep, REG_GIINTMSK, 0);
603
604 for (i = 0; i < 10; i++) {
605 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0)
606 break;
607 msleep(1);
608 }
609
610 if (i == 10) {
611 pr_crit("hw failed to start\n");
612 return 1;
613 }
614
615 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES);
616 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES);
617
618 wrb(ep, REG_INDAD0, dev->dev_addr[0]);
619 wrb(ep, REG_INDAD1, dev->dev_addr[1]);
620 wrb(ep, REG_INDAD2, dev->dev_addr[2]);
621 wrb(ep, REG_INDAD3, dev->dev_addr[3]);
622 wrb(ep, REG_INDAD4, dev->dev_addr[4]);
623 wrb(ep, REG_INDAD5, dev->dev_addr[5]);
624 wrl(ep, REG_AFP, 0);
625
626 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE);
627
628 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT);
629 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE);
630
631 return 0;
632}
633
634static void ep93xx_stop_hw(struct net_device *dev)
635{
636 struct ep93xx_priv *ep = netdev_priv(dev);
637 int i;
638
639 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
640 for (i = 0; i < 10; i++) {
641 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
642 break;
643 msleep(1);
644 }
645
646 if (i == 10)
647 pr_crit("hw failed to reset\n");
648}
649
650static int ep93xx_open(struct net_device *dev)
651{
652 struct ep93xx_priv *ep = netdev_priv(dev);
653 int err;
654
655 if (ep93xx_alloc_buffers(ep))
656 return -ENOMEM;
657
658 napi_enable(&ep->napi);
659
660 if (ep93xx_start_hw(dev)) {
661 napi_disable(&ep->napi);
662 ep93xx_free_buffers(ep);
663 return -EIO;
664 }
665
666 spin_lock_init(&ep->rx_lock);
667 ep->rx_pointer = 0;
668 ep->tx_clean_pointer = 0;
669 ep->tx_pointer = 0;
670 spin_lock_init(&ep->tx_pending_lock);
671 ep->tx_pending = 0;
672
673 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev);
674 if (err) {
675 napi_disable(&ep->napi);
676 ep93xx_stop_hw(dev);
677 ep93xx_free_buffers(ep);
678 return err;
679 }
680
681 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE);
682
683 netif_start_queue(dev);
684
685 return 0;
686}
687
688static int ep93xx_close(struct net_device *dev)
689{
690 struct ep93xx_priv *ep = netdev_priv(dev);
691
692 napi_disable(&ep->napi);
693 netif_stop_queue(dev);
694
695 wrl(ep, REG_GIINTMSK, 0);
696 free_irq(ep->irq, dev);
697 ep93xx_stop_hw(dev);
698 ep93xx_free_buffers(ep);
699
700 return 0;
701}
702
703static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
704{
705 struct ep93xx_priv *ep = netdev_priv(dev);
706 struct mii_ioctl_data *data = if_mii(ifr);
707
708 return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
709}
710
711static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
712{
713 strcpy(info->driver, DRV_MODULE_NAME);
714 strcpy(info->version, DRV_MODULE_VERSION);
715}
716
717static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
718{
719 struct ep93xx_priv *ep = netdev_priv(dev);
720 return mii_ethtool_gset(&ep->mii, cmd);
721}
722
723static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
724{
725 struct ep93xx_priv *ep = netdev_priv(dev);
726 return mii_ethtool_sset(&ep->mii, cmd);
727}
728
729static int ep93xx_nway_reset(struct net_device *dev)
730{
731 struct ep93xx_priv *ep = netdev_priv(dev);
732 return mii_nway_restart(&ep->mii);
733}
734
735static u32 ep93xx_get_link(struct net_device *dev)
736{
737 struct ep93xx_priv *ep = netdev_priv(dev);
738 return mii_link_ok(&ep->mii);
739}
740
741static const struct ethtool_ops ep93xx_ethtool_ops = {
742 .get_drvinfo = ep93xx_get_drvinfo,
743 .get_settings = ep93xx_get_settings,
744 .set_settings = ep93xx_set_settings,
745 .nway_reset = ep93xx_nway_reset,
746 .get_link = ep93xx_get_link,
747};
748
749static const struct net_device_ops ep93xx_netdev_ops = {
750 .ndo_open = ep93xx_open,
751 .ndo_stop = ep93xx_close,
752 .ndo_start_xmit = ep93xx_xmit,
753 .ndo_do_ioctl = ep93xx_ioctl,
754 .ndo_validate_addr = eth_validate_addr,
755 .ndo_change_mtu = eth_change_mtu,
756 .ndo_set_mac_address = eth_mac_addr,
757};
758
759static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
760{
761 struct net_device *dev;
762
763 dev = alloc_etherdev(sizeof(struct ep93xx_priv));
764 if (dev == NULL)
765 return NULL;
766
767 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
768
769 dev->ethtool_ops = &ep93xx_ethtool_ops;
770 dev->netdev_ops = &ep93xx_netdev_ops;
771
772 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
773
774 return dev;
775}
776
777
778static int ep93xx_eth_remove(struct platform_device *pdev)
779{
780 struct net_device *dev;
781 struct ep93xx_priv *ep;
782
783 dev = platform_get_drvdata(pdev);
784 if (dev == NULL)
785 return 0;
786 platform_set_drvdata(pdev, NULL);
787
788 ep = netdev_priv(dev);
789
790 /* @@@ Force down. */
791 unregister_netdev(dev);
792 ep93xx_free_buffers(ep);
793
794 if (ep->base_addr != NULL)
795 iounmap(ep->base_addr);
796
797 if (ep->res != NULL) {
798 release_resource(ep->res);
799 kfree(ep->res);
800 }
801
802 free_netdev(dev);
803
804 return 0;
805}
806
807static int ep93xx_eth_probe(struct platform_device *pdev)
808{
809 struct ep93xx_eth_data *data;
810 struct net_device *dev;
811 struct ep93xx_priv *ep;
812 struct resource *mem;
813 int irq;
814 int err;
815
816 if (pdev == NULL)
817 return -ENODEV;
818 data = pdev->dev.platform_data;
819
820 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
821 irq = platform_get_irq(pdev, 0);
822 if (!mem || irq < 0)
823 return -ENXIO;
824
825 dev = ep93xx_dev_alloc(data);
826 if (dev == NULL) {
827 err = -ENOMEM;
828 goto err_out;
829 }
830 ep = netdev_priv(dev);
831 ep->dev = dev;
832 SET_NETDEV_DEV(dev, &pdev->dev);
833 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
834
835 platform_set_drvdata(pdev, dev);
836
837 ep->res = request_mem_region(mem->start, resource_size(mem),
838 dev_name(&pdev->dev));
839 if (ep->res == NULL) {
840 dev_err(&pdev->dev, "Could not reserve memory region\n");
841 err = -ENOMEM;
842 goto err_out;
843 }
844
845 ep->base_addr = ioremap(mem->start, resource_size(mem));
846 if (ep->base_addr == NULL) {
847 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
848 err = -EIO;
849 goto err_out;
850 }
851 ep->irq = irq;
852
853 ep->mii.phy_id = data->phy_id;
854 ep->mii.phy_id_mask = 0x1f;
855 ep->mii.reg_num_mask = 0x1f;
856 ep->mii.dev = dev;
857 ep->mii.mdio_read = ep93xx_mdio_read;
858 ep->mii.mdio_write = ep93xx_mdio_write;
859 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
860
861 if (is_zero_ether_addr(dev->dev_addr))
862 random_ether_addr(dev->dev_addr);
863
864 err = register_netdev(dev);
865 if (err) {
866 dev_err(&pdev->dev, "Failed to register netdev\n");
867 goto err_out;
868 }
869
870 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
871 dev->name, ep->irq, dev->dev_addr);
872
873 return 0;
874
875err_out:
876 ep93xx_eth_remove(pdev);
877 return err;
878}
879
880
881static struct platform_driver ep93xx_eth_driver = {
882 .probe = ep93xx_eth_probe,
883 .remove = ep93xx_eth_remove,
884 .driver = {
885 .name = "ep93xx-eth",
886 .owner = THIS_MODULE,
887 },
888};
889
890static int __init ep93xx_eth_init_module(void)
891{
892 printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
893 return platform_driver_register(&ep93xx_eth_driver);
894}
895
896static void __exit ep93xx_eth_cleanup_module(void)
897{
898 platform_driver_unregister(&ep93xx_eth_driver);
899}
900
901module_init(ep93xx_eth_init_module);
902module_exit(ep93xx_eth_cleanup_module);
903MODULE_LICENSE("GPL");
904MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
new file mode 100644
index 00000000000..b00781c02d5
--- /dev/null
+++ b/drivers/net/arm/ether1.c
@@ -0,0 +1,1094 @@
1/*
2 * linux/drivers/acorn/net/ether1.c
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Acorn ether1 driver (82586 chip) for Acorn machines
11 *
12 * We basically keep two queues in the cards memory - one for transmit
13 * and one for receive. Each has a head and a tail. The head is where
14 * we/the chip adds packets to be transmitted/received, and the tail
15 * is where the transmitter has got to/where the receiver will stop.
16 * Both of these queues are circular, and since the chip is running
17 * all the time, we have to be careful when we modify the pointers etc
18 * so that the buffer memory contents is valid all the time.
19 *
20 * Change log:
21 * 1.00 RMK Released
22 * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now.
23 * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
24 * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
25 * Should prevent lockup.
26 * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong.
27 * TDR now only reports failure when chip reports non-zero
28 * TDR time-distance.
29 * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
30 * 1.06 RMK 10/02/2000 Updated for 2.3.43
31 * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8
32 */
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/fcntl.h>
38#include <linux/interrupt.h>
39#include <linux/ioport.h>
40#include <linux/in.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/errno.h>
44#include <linux/device.h>
45#include <linux/init.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/bitops.h>
50
51#include <asm/system.h>
52#include <asm/io.h>
53#include <asm/dma.h>
54#include <asm/ecard.h>
55
56#define __ETHER1_C
57#include "ether1.h"
58
59static unsigned int net_debug = NET_DEBUG;
60
61#define BUFFER_SIZE 0x10000
62#define TX_AREA_START 0x00100
63#define TX_AREA_END 0x05000
64#define RX_AREA_START 0x05000
65#define RX_AREA_END 0x0fc00
66
67static int ether1_open(struct net_device *dev);
68static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
69static irqreturn_t ether1_interrupt(int irq, void *dev_id);
70static int ether1_close(struct net_device *dev);
71static void ether1_setmulticastlist(struct net_device *dev);
72static void ether1_timeout(struct net_device *dev);
73
74/* ------------------------------------------------------------------------- */
75
76static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
77
78#define BUS_16 16
79#define BUS_8 8
80
81/* ------------------------------------------------------------------------- */
82
83#define DISABLEIRQS 1
84#define NORMALIRQS 0
85
86#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs)
87#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs)
88
89static inline unsigned short
90ether1_inw_p (struct net_device *dev, int addr, int svflgs)
91{
92 unsigned long flags;
93 unsigned short ret;
94
95 if (svflgs)
96 local_irq_save (flags);
97
98 writeb(addr >> 12, REG_PAGE);
99 ret = readw(ETHER1_RAM + ((addr & 4095) << 1));
100 if (svflgs)
101 local_irq_restore (flags);
102 return ret;
103}
104
105static inline void
106ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
107{
108 unsigned long flags;
109
110 if (svflgs)
111 local_irq_save (flags);
112
113 writeb(addr >> 12, REG_PAGE);
114 writew(val, ETHER1_RAM + ((addr & 4095) << 1));
115 if (svflgs)
116 local_irq_restore (flags);
117}
118
119/*
120 * Some inline assembler to allow fast transfers on to/off of the card.
121 * Since this driver depends on some features presented by the ARM
122 * specific architecture, and that you can't configure this driver
123 * without specifiing ARM mode, this is not a problem.
124 *
125 * This routine is essentially an optimised memcpy from the card's
126 * onboard RAM to kernel memory.
127 */
128static void
129ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
130{
131 unsigned int page, thislen, offset;
132 void __iomem *addr;
133
134 offset = start & 4095;
135 page = start >> 12;
136 addr = ETHER1_RAM + (offset << 1);
137
138 if (offset + length > 4096)
139 thislen = 4096 - offset;
140 else
141 thislen = length;
142
143 do {
144 int used;
145
146 writeb(page, REG_PAGE);
147 length -= thislen;
148
149 __asm__ __volatile__(
150 "subs %3, %3, #2\n\
151 bmi 2f\n\
1521: ldr %0, [%1], #2\n\
153 mov %0, %0, lsl #16\n\
154 orr %0, %0, %0, lsr #16\n\
155 str %0, [%2], #4\n\
156 subs %3, %3, #2\n\
157 bmi 2f\n\
158 ldr %0, [%1], #2\n\
159 mov %0, %0, lsl #16\n\
160 orr %0, %0, %0, lsr #16\n\
161 str %0, [%2], #4\n\
162 subs %3, %3, #2\n\
163 bmi 2f\n\
164 ldr %0, [%1], #2\n\
165 mov %0, %0, lsl #16\n\
166 orr %0, %0, %0, lsr #16\n\
167 str %0, [%2], #4\n\
168 subs %3, %3, #2\n\
169 bmi 2f\n\
170 ldr %0, [%1], #2\n\
171 mov %0, %0, lsl #16\n\
172 orr %0, %0, %0, lsr #16\n\
173 str %0, [%2], #4\n\
174 subs %3, %3, #2\n\
175 bpl 1b\n\
1762: adds %3, %3, #1\n\
177 ldreqb %0, [%1]\n\
178 streqb %0, [%2]"
179 : "=&r" (used), "=&r" (data)
180 : "r" (addr), "r" (thislen), "1" (data));
181
182 addr = ETHER1_RAM;
183
184 thislen = length;
185 if (thislen > 4096)
186 thislen = 4096;
187 page++;
188 } while (thislen);
189}
190
191static void
192ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
193{
194 unsigned int page, thislen, offset;
195 void __iomem *addr;
196
197 offset = start & 4095;
198 page = start >> 12;
199 addr = ETHER1_RAM + (offset << 1);
200
201 if (offset + length > 4096)
202 thislen = 4096 - offset;
203 else
204 thislen = length;
205
206 do {
207 int used;
208
209 writeb(page, REG_PAGE);
210 length -= thislen;
211
212 __asm__ __volatile__(
213 "subs %3, %3, #2\n\
214 bmi 2f\n\
2151: ldr %0, [%2], #4\n\
216 strb %0, [%1], #1\n\
217 mov %0, %0, lsr #8\n\
218 strb %0, [%1], #1\n\
219 subs %3, %3, #2\n\
220 bmi 2f\n\
221 ldr %0, [%2], #4\n\
222 strb %0, [%1], #1\n\
223 mov %0, %0, lsr #8\n\
224 strb %0, [%1], #1\n\
225 subs %3, %3, #2\n\
226 bmi 2f\n\
227 ldr %0, [%2], #4\n\
228 strb %0, [%1], #1\n\
229 mov %0, %0, lsr #8\n\
230 strb %0, [%1], #1\n\
231 subs %3, %3, #2\n\
232 bmi 2f\n\
233 ldr %0, [%2], #4\n\
234 strb %0, [%1], #1\n\
235 mov %0, %0, lsr #8\n\
236 strb %0, [%1], #1\n\
237 subs %3, %3, #2\n\
238 bpl 1b\n\
2392: adds %3, %3, #1\n\
240 ldreqb %0, [%2]\n\
241 streqb %0, [%1]"
242 : "=&r" (used), "=&r" (data)
243 : "r" (addr), "r" (thislen), "1" (data));
244
245 addr = ETHER1_RAM;
246
247 thislen = length;
248 if (thislen > 4096)
249 thislen = 4096;
250 page++;
251 } while (thislen);
252}
253
254static int __devinit
255ether1_ramtest(struct net_device *dev, unsigned char byte)
256{
257 unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
258 int i, ret = BUFFER_SIZE;
259 int max_errors = 15;
260 int bad = -1;
261 int bad_start = 0;
262
263 if (!buffer)
264 return 1;
265
266 memset (buffer, byte, BUFFER_SIZE);
267 ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE);
268 memset (buffer, byte ^ 0xff, BUFFER_SIZE);
269 ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE);
270
271 for (i = 0; i < BUFFER_SIZE; i++) {
272 if (buffer[i] != byte) {
273 if (max_errors >= 0 && bad != buffer[i]) {
274 if (bad != -1)
275 printk ("\n");
276 printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X",
277 dev->name, buffer[i], byte, i);
278 ret = -ENODEV;
279 max_errors --;
280 bad = buffer[i];
281 bad_start = i;
282 }
283 } else {
284 if (bad != -1) {
285 if (bad_start == i - 1)
286 printk ("\n");
287 else
288 printk (" - 0x%04X\n", i - 1);
289 bad = -1;
290 }
291 }
292 }
293
294 if (bad != -1)
295 printk (" - 0x%04X\n", BUFFER_SIZE);
296 kfree (buffer);
297
298 return ret;
299}
300
301static int
302ether1_reset (struct net_device *dev)
303{
304 writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
305 return BUS_16;
306}
307
308static int __devinit
309ether1_init_2(struct net_device *dev)
310{
311 int i;
312 dev->mem_start = 0;
313
314 i = ether1_ramtest (dev, 0x5a);
315
316 if (i > 0)
317 i = ether1_ramtest (dev, 0x1e);
318
319 if (i <= 0)
320 return -ENODEV;
321
322 dev->mem_end = i;
323 return 0;
324}
325
326/*
327 * These are the structures that are loaded into the ether RAM card to
328 * initialise the 82586
329 */
330
331/* at 0x0100 */
332#define NOP_ADDR (TX_AREA_START)
333#define NOP_SIZE (0x06)
334static nop_t init_nop = {
335 0,
336 CMD_NOP,
337 NOP_ADDR
338};
339
340/* at 0x003a */
341#define TDR_ADDR (0x003a)
342#define TDR_SIZE (0x08)
343static tdr_t init_tdr = {
344 0,
345 CMD_TDR | CMD_INTR,
346 NOP_ADDR,
347 0
348};
349
350/* at 0x002e */
351#define MC_ADDR (0x002e)
352#define MC_SIZE (0x0c)
353static mc_t init_mc = {
354 0,
355 CMD_SETMULTICAST,
356 TDR_ADDR,
357 0,
358 { { 0, } }
359};
360
361/* at 0x0022 */
362#define SA_ADDR (0x0022)
363#define SA_SIZE (0x0c)
364static sa_t init_sa = {
365 0,
366 CMD_SETADDRESS,
367 MC_ADDR,
368 { 0, }
369};
370
371/* at 0x0010 */
372#define CFG_ADDR (0x0010)
373#define CFG_SIZE (0x12)
374static cfg_t init_cfg = {
375 0,
376 CMD_CONFIG,
377 SA_ADDR,
378 8,
379 8,
380 CFG8_SRDY,
381 CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6),
382 0,
383 0x60,
384 0,
385 CFG13_RETRY(15) | CFG13_SLOTH(2),
386 0,
387};
388
389/* at 0x0000 */
390#define SCB_ADDR (0x0000)
391#define SCB_SIZE (0x10)
392static scb_t init_scb = {
393 0,
394 SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX,
395 CFG_ADDR,
396 RX_AREA_START,
397 0,
398 0,
399 0,
400 0
401};
402
403/* at 0xffee */
404#define ISCP_ADDR (0xffee)
405#define ISCP_SIZE (0x08)
406static iscp_t init_iscp = {
407 1,
408 SCB_ADDR,
409 0x0000,
410 0x0000
411};
412
413/* at 0xfff6 */
414#define SCP_ADDR (0xfff6)
415#define SCP_SIZE (0x0a)
416static scp_t init_scp = {
417 SCP_SY_16BBUS,
418 { 0, 0 },
419 ISCP_ADDR,
420 0
421};
422
423#define RFD_SIZE (0x16)
424static rfd_t init_rfd = {
425 0,
426 0,
427 0,
428 0,
429 { 0, },
430 { 0, },
431 0
432};
433
434#define RBD_SIZE (0x0a)
435static rbd_t init_rbd = {
436 0,
437 0,
438 0,
439 0,
440 ETH_FRAME_LEN + 8
441};
442
443#define TX_SIZE (0x08)
444#define TBD_SIZE (0x08)
445
446static int
447ether1_init_for_open (struct net_device *dev)
448{
449 int i, status, addr, next, next2;
450 int failures = 0;
451 unsigned long timeout;
452
453 writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
454
455 for (i = 0; i < 6; i++)
456 init_sa.sa_addr[i] = dev->dev_addr[i];
457
458 /* load data structures into ether1 RAM */
459 ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE);
460 ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE);
461 ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE);
462 ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE);
463 ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE);
464 ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE);
465 ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE);
466 ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE);
467
468 if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) {
469 printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n",
470 dev->name);
471 return 1;
472 }
473
474 /*
475 * setup circularly linked list of { rfd, rbd, buffer }, with
476 * all rfds circularly linked, rbds circularly linked.
477 * First rfd is linked to scp, first rbd is linked to first
478 * rfd. Last rbd has a suspend command.
479 */
480 addr = RX_AREA_START;
481 do {
482 next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
483 next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
484
485 if (next2 >= RX_AREA_END) {
486 next = RX_AREA_START;
487 init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND;
488 priv(dev)->rx_tail = addr;
489 } else
490 init_rfd.rfd_command = 0;
491 if (addr == RX_AREA_START)
492 init_rfd.rfd_rbdoffset = addr + RFD_SIZE;
493 else
494 init_rfd.rfd_rbdoffset = 0;
495 init_rfd.rfd_link = next;
496 init_rbd.rbd_link = next + RFD_SIZE;
497 init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE;
498
499 ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE);
500 ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE);
501 addr = next;
502 } while (next2 < RX_AREA_END);
503
504 priv(dev)->tx_link = NOP_ADDR;
505 priv(dev)->tx_head = NOP_ADDR + NOP_SIZE;
506 priv(dev)->tx_tail = TDR_ADDR;
507 priv(dev)->rx_head = RX_AREA_START;
508
509 /* release reset & give 586 a prod */
510 priv(dev)->resetting = 1;
511 priv(dev)->initialising = 1;
512 writeb(CTRL_RST, REG_CONTROL);
513 writeb(0, REG_CONTROL);
514 writeb(CTRL_CA, REG_CONTROL);
515
516 /* 586 should now unset iscp.busy */
517 timeout = jiffies + HZ/2;
518 while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) {
519 if (time_after(jiffies, timeout)) {
520 printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name);
521 return 1;
522 }
523 }
524
525 /* check status of commands that we issued */
526 timeout += HZ/10;
527 while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS))
528 & STAT_COMPLETE) == 0) {
529 if (time_after(jiffies, timeout))
530 break;
531 }
532
533 if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
534 printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status);
535 printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
536 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
537 ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
538 ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
539 ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
540 failures += 1;
541 }
542
543 timeout += HZ/10;
544 while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS))
545 & STAT_COMPLETE) == 0) {
546 if (time_after(jiffies, timeout))
547 break;
548 }
549
550 if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
551 printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status);
552 printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
553 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
554 ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
555 ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
556 ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
557 failures += 1;
558 }
559
560 timeout += HZ/10;
561 while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS))
562 & STAT_COMPLETE) == 0) {
563 if (time_after(jiffies, timeout))
564 break;
565 }
566
567 if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
568 printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status);
569 printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
570 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
571 ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
572 ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
573 ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
574 failures += 1;
575 }
576
577 timeout += HZ;
578 while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS))
579 & STAT_COMPLETE) == 0) {
580 if (time_after(jiffies, timeout))
581 break;
582 }
583
584 if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
585 printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name);
586 printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
587 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
588 ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
589 ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
590 ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
591 } else {
592 status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS);
593 if (status & TDR_XCVRPROB)
594 printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name);
595 else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) {
596#ifdef FANCY
597 printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name,
598 status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10,
599 (status & TDR_TIME) % 10);
600#else
601 printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name,
602 status & TDR_SHORT ? "short" : "open", (status & TDR_TIME));
603#endif
604 }
605 }
606
607 if (failures)
608 ether1_reset (dev);
609 return failures ? 1 : 0;
610}
611
612/* ------------------------------------------------------------------------- */
613
614static int
615ether1_txalloc (struct net_device *dev, int size)
616{
617 int start, tail;
618
619 size = (size + 1) & ~1;
620 tail = priv(dev)->tx_tail;
621
622 if (priv(dev)->tx_head + size > TX_AREA_END) {
623 if (tail > priv(dev)->tx_head)
624 return -1;
625 start = TX_AREA_START;
626 if (start + size > tail)
627 return -1;
628 priv(dev)->tx_head = start + size;
629 } else {
630 if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail)
631 return -1;
632 start = priv(dev)->tx_head;
633 priv(dev)->tx_head += size;
634 }
635
636 return start;
637}
638
639static int
640ether1_open (struct net_device *dev)
641{
642 if (!is_valid_ether_addr(dev->dev_addr)) {
643 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
644 dev->name);
645 return -EINVAL;
646 }
647
648 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
649 return -EAGAIN;
650
651 if (ether1_init_for_open (dev)) {
652 free_irq (dev->irq, dev);
653 return -EAGAIN;
654 }
655
656 netif_start_queue(dev);
657
658 return 0;
659}
660
661static void
662ether1_timeout(struct net_device *dev)
663{
664 printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
665 dev->name);
666 printk(KERN_WARNING "%s: resetting device\n", dev->name);
667
668 ether1_reset (dev);
669
670 if (ether1_init_for_open (dev))
671 printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
672
673 dev->stats.tx_errors++;
674 netif_wake_queue(dev);
675}
676
677static int
678ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
679{
680 int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
681 unsigned long flags;
682 tx_t tx;
683 tbd_t tbd;
684 nop_t nop;
685
686 if (priv(dev)->restart) {
687 printk(KERN_WARNING "%s: resetting device\n", dev->name);
688
689 ether1_reset(dev);
690
691 if (ether1_init_for_open(dev))
692 printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
693 else
694 priv(dev)->restart = 0;
695 }
696
697 if (skb->len < ETH_ZLEN) {
698 if (skb_padto(skb, ETH_ZLEN))
699 goto out;
700 }
701
702 /*
703 * insert packet followed by a nop
704 */
705 txaddr = ether1_txalloc (dev, TX_SIZE);
706 tbdaddr = ether1_txalloc (dev, TBD_SIZE);
707 dataddr = ether1_txalloc (dev, skb->len);
708 nopaddr = ether1_txalloc (dev, NOP_SIZE);
709
710 tx.tx_status = 0;
711 tx.tx_command = CMD_TX | CMD_INTR;
712 tx.tx_link = nopaddr;
713 tx.tx_tbdoffset = tbdaddr;
714 tbd.tbd_opts = TBD_EOL | skb->len;
715 tbd.tbd_link = I82586_NULL;
716 tbd.tbd_bufl = dataddr;
717 tbd.tbd_bufh = 0;
718 nop.nop_status = 0;
719 nop.nop_command = CMD_NOP;
720 nop.nop_link = nopaddr;
721
722 local_irq_save(flags);
723 ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
724 ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
725 ether1_writebuffer (dev, skb->data, dataddr, skb->len);
726 ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
727 tmp = priv(dev)->tx_link;
728 priv(dev)->tx_link = nopaddr;
729
730 /* now reset the previous nop pointer */
731 ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);
732
733 local_irq_restore(flags);
734
735 /* handle transmit */
736
737 /* check to see if we have room for a full sized ether frame */
738 tmp = priv(dev)->tx_head;
739 tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
740 priv(dev)->tx_head = tmp;
741 dev_kfree_skb (skb);
742
743 if (tst == -1)
744 netif_stop_queue(dev);
745
746 out:
747 return NETDEV_TX_OK;
748}
749
750static void
751ether1_xmit_done (struct net_device *dev)
752{
753 nop_t nop;
754 int caddr, tst;
755
756 caddr = priv(dev)->tx_tail;
757
758again:
759 ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
760
761 switch (nop.nop_command & CMD_MASK) {
762 case CMD_TDR:
763 /* special case */
764 if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
765 != (unsigned short)I82586_NULL) {
766 ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t,
767 scb_command, NORMALIRQS);
768 writeb(CTRL_CA, REG_CONTROL);
769 }
770 priv(dev)->tx_tail = NOP_ADDR;
771 return;
772
773 case CMD_NOP:
774 if (nop.nop_link == caddr) {
775 if (priv(dev)->initialising == 0)
776 printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name);
777 else
778 priv(dev)->initialising = 0;
779 return;
780 }
781 if (caddr == nop.nop_link)
782 return;
783 caddr = nop.nop_link;
784 goto again;
785
786 case CMD_TX:
787 if (nop.nop_status & STAT_COMPLETE)
788 break;
789 printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name);
790 priv(dev)->restart = 1;
791 return;
792
793 default:
794 printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name,
795 nop.nop_command & CMD_MASK, caddr);
796 priv(dev)->restart = 1;
797 return;
798 }
799
800 while (nop.nop_status & STAT_COMPLETE) {
801 if (nop.nop_status & STAT_OK) {
802 dev->stats.tx_packets++;
803 dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
804 } else {
805 dev->stats.tx_errors++;
806
807 if (nop.nop_status & STAT_COLLAFTERTX)
808 dev->stats.collisions++;
809 if (nop.nop_status & STAT_NOCARRIER)
810 dev->stats.tx_carrier_errors++;
811 if (nop.nop_status & STAT_TXLOSTCTS)
812 printk (KERN_WARNING "%s: cts lost\n", dev->name);
813 if (nop.nop_status & STAT_TXSLOWDMA)
814 dev->stats.tx_fifo_errors++;
815 if (nop.nop_status & STAT_COLLEXCESSIVE)
816 dev->stats.collisions += 16;
817 }
818
819 if (nop.nop_link == caddr) {
820 printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name);
821 break;
822 }
823
824 caddr = nop.nop_link;
825 ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
826 if ((nop.nop_command & CMD_MASK) != CMD_NOP) {
827 printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name);
828 break;
829 }
830
831 if (caddr == nop.nop_link)
832 break;
833
834 caddr = nop.nop_link;
835 ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
836 if ((nop.nop_command & CMD_MASK) != CMD_TX) {
837 printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name);
838 break;
839 }
840 }
841 priv(dev)->tx_tail = caddr;
842
843 caddr = priv(dev)->tx_head;
844 tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
845 priv(dev)->tx_head = caddr;
846 if (tst != -1)
847 netif_wake_queue(dev);
848}
849
850static void
851ether1_recv_done (struct net_device *dev)
852{
853 int status;
854 int nexttail, rbdaddr;
855 rbd_t rbd;
856
857 do {
858 status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS);
859 if ((status & RFD_COMPLETE) == 0)
860 break;
861
862 rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS);
863 ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE);
864
865 if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) {
866 int length = rbd.rbd_status & RBD_ACNT;
867 struct sk_buff *skb;
868
869 length = (length + 1) & ~1;
870 skb = dev_alloc_skb (length + 2);
871
872 if (skb) {
873 skb_reserve (skb, 2);
874
875 ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
876
877 skb->protocol = eth_type_trans (skb, dev);
878 netif_rx (skb);
879 dev->stats.rx_packets++;
880 } else
881 dev->stats.rx_dropped++;
882 } else {
883 printk(KERN_WARNING "%s: %s\n", dev->name,
884 (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
885 dev->stats.rx_dropped++;
886 }
887
888 nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
889 /* nexttail should be rx_head */
890 if (nexttail != priv(dev)->rx_head)
891 printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n",
892 dev->name, nexttail, priv(dev)->rx_head);
893 ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS);
894 ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS);
895 ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS);
896 ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS);
897
898 priv(dev)->rx_tail = nexttail;
899 priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS);
900 } while (1);
901}
902
903static irqreturn_t
904ether1_interrupt (int irq, void *dev_id)
905{
906 struct net_device *dev = (struct net_device *)dev_id;
907 int status;
908
909 status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS);
910
911 if (status) {
912 ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX),
913 SCB_ADDR, scb_t, scb_command, NORMALIRQS);
914 writeb(CTRL_CA | CTRL_ACK, REG_CONTROL);
915 if (status & SCB_STCX) {
916 ether1_xmit_done (dev);
917 }
918 if (status & SCB_STCNA) {
919 if (priv(dev)->resetting == 0)
920 printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name);
921 else
922 priv(dev)->resetting += 1;
923 if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
924 != (unsigned short)I82586_NULL) {
925 ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
926 writeb(CTRL_CA, REG_CONTROL);
927 }
928 if (priv(dev)->resetting == 2)
929 priv(dev)->resetting = 0;
930 }
931 if (status & SCB_STFR) {
932 ether1_recv_done (dev);
933 }
934 if (status & SCB_STRNR) {
935 if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) {
936 printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
937 ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
938 writeb(CTRL_CA, REG_CONTROL);
939 dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
940 } else
941 printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
942 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
943 printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset,
944 NORMALIRQS));
945 }
946 } else
947 writeb(CTRL_ACK, REG_CONTROL);
948
949 return IRQ_HANDLED;
950}
951
952static int
953ether1_close (struct net_device *dev)
954{
955 ether1_reset (dev);
956
957 free_irq(dev->irq, dev);
958
959 return 0;
960}
961
962/*
963 * Set or clear the multicast filter for this adaptor.
964 * num_addrs == -1 Promiscuous mode, receive all packets.
965 * num_addrs == 0 Normal mode, clear multicast list.
966 * num_addrs > 0 Multicast mode, receive normal and MC packets, and do
967 * best-effort filtering.
968 */
969static void
970ether1_setmulticastlist (struct net_device *dev)
971{
972}
973
974/* ------------------------------------------------------------------------- */
975
976static void __devinit ether1_banner(void)
977{
978 static unsigned int version_printed = 0;
979
980 if (net_debug && version_printed++ == 0)
981 printk(KERN_INFO "%s", version);
982}
983
984static const struct net_device_ops ether1_netdev_ops = {
985 .ndo_open = ether1_open,
986 .ndo_stop = ether1_close,
987 .ndo_start_xmit = ether1_sendpacket,
988 .ndo_set_multicast_list = ether1_setmulticastlist,
989 .ndo_tx_timeout = ether1_timeout,
990 .ndo_validate_addr = eth_validate_addr,
991 .ndo_change_mtu = eth_change_mtu,
992 .ndo_set_mac_address = eth_mac_addr,
993};
994
995static int __devinit
996ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
997{
998 struct net_device *dev;
999 int i, ret = 0;
1000
1001 ether1_banner();
1002
1003 ret = ecard_request_resources(ec);
1004 if (ret)
1005 goto out;
1006
1007 dev = alloc_etherdev(sizeof(struct ether1_priv));
1008 if (!dev) {
1009 ret = -ENOMEM;
1010 goto release;
1011 }
1012
1013 SET_NETDEV_DEV(dev, &ec->dev);
1014
1015 dev->irq = ec->irq;
1016 priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
1017 if (!priv(dev)->base) {
1018 ret = -ENOMEM;
1019 goto free;
1020 }
1021
1022 if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) {
1023 ret = -ENODEV;
1024 goto free;
1025 }
1026
1027 for (i = 0; i < 6; i++)
1028 dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
1029
1030 if (ether1_init_2(dev)) {
1031 ret = -ENODEV;
1032 goto free;
1033 }
1034
1035 dev->netdev_ops = &ether1_netdev_ops;
1036 dev->watchdog_timeo = 5 * HZ / 100;
1037
1038 ret = register_netdev(dev);
1039 if (ret)
1040 goto free;
1041
1042 printk(KERN_INFO "%s: ether1 in slot %d, %pM\n",
1043 dev->name, ec->slot_no, dev->dev_addr);
1044
1045 ecard_set_drvdata(ec, dev);
1046 return 0;
1047
1048 free:
1049 free_netdev(dev);
1050 release:
1051 ecard_release_resources(ec);
1052 out:
1053 return ret;
1054}
1055
1056static void __devexit ether1_remove(struct expansion_card *ec)
1057{
1058 struct net_device *dev = ecard_get_drvdata(ec);
1059
1060 ecard_set_drvdata(ec, NULL);
1061
1062 unregister_netdev(dev);
1063 free_netdev(dev);
1064 ecard_release_resources(ec);
1065}
1066
1067static const struct ecard_id ether1_ids[] = {
1068 { MANU_ACORN, PROD_ACORN_ETHER1 },
1069 { 0xffff, 0xffff }
1070};
1071
1072static struct ecard_driver ether1_driver = {
1073 .probe = ether1_probe,
1074 .remove = __devexit_p(ether1_remove),
1075 .id_table = ether1_ids,
1076 .drv = {
1077 .name = "ether1",
1078 },
1079};
1080
1081static int __init ether1_init(void)
1082{
1083 return ecard_register_driver(&ether1_driver);
1084}
1085
1086static void __exit ether1_exit(void)
1087{
1088 ecard_remove_driver(&ether1_driver);
1089}
1090
1091module_init(ether1_init);
1092module_exit(ether1_exit);
1093
1094MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
new file mode 100644
index 00000000000..3a5830ab3dc
--- /dev/null
+++ b/drivers/net/arm/ether1.h
@@ -0,0 +1,280 @@
1/*
2 * linux/drivers/acorn/net/ether1.h
3 *
4 * Copyright (C) 1996 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Network driver for Acorn Ether1 cards.
11 */
12
13#ifndef _LINUX_ether1_H
14#define _LINUX_ether1_H
15
16#ifdef __ETHER1_C
17/* use 0 for production, 1 for verification, >2 for debug */
18#ifndef NET_DEBUG
19#define NET_DEBUG 0
20#endif
21
22#define priv(dev) ((struct ether1_priv *)netdev_priv(dev))
23
24/* Page register */
25#define REG_PAGE (priv(dev)->base + 0x0000)
26
27/* Control register */
28#define REG_CONTROL (priv(dev)->base + 0x0004)
29#define CTRL_RST 0x01
30#define CTRL_LOOPBACK 0x02
31#define CTRL_CA 0x04
32#define CTRL_ACK 0x08
33
34#define ETHER1_RAM (priv(dev)->base + 0x2000)
35
36/* HW address */
37#define IDPROM_ADDRESS (priv(dev)->base + 0x0024)
38
39struct ether1_priv {
40 void __iomem *base;
41 unsigned int tx_link;
42 unsigned int tx_head;
43 volatile unsigned int tx_tail;
44 volatile unsigned int rx_head;
45 volatile unsigned int rx_tail;
46 unsigned char bus_type;
47 unsigned char resetting;
48 unsigned char initialising : 1;
49 unsigned char restart : 1;
50};
51
52#define I82586_NULL (-1)
53
54typedef struct { /* tdr */
55 unsigned short tdr_status;
56 unsigned short tdr_command;
57 unsigned short tdr_link;
58 unsigned short tdr_result;
59#define TDR_TIME (0x7ff)
60#define TDR_SHORT (1 << 12)
61#define TDR_OPEN (1 << 13)
62#define TDR_XCVRPROB (1 << 14)
63#define TDR_LNKOK (1 << 15)
64} tdr_t;
65
66typedef struct { /* transmit */
67 unsigned short tx_status;
68 unsigned short tx_command;
69 unsigned short tx_link;
70 unsigned short tx_tbdoffset;
71} tx_t;
72
73typedef struct { /* tbd */
74 unsigned short tbd_opts;
75#define TBD_CNT (0x3fff)
76#define TBD_EOL (1 << 15)
77 unsigned short tbd_link;
78 unsigned short tbd_bufl;
79 unsigned short tbd_bufh;
80} tbd_t;
81
82typedef struct { /* rfd */
83 unsigned short rfd_status;
84#define RFD_NOEOF (1 << 6)
85#define RFD_FRAMESHORT (1 << 7)
86#define RFD_DMAOVRN (1 << 8)
87#define RFD_NORESOURCES (1 << 9)
88#define RFD_ALIGNERROR (1 << 10)
89#define RFD_CRCERROR (1 << 11)
90#define RFD_OK (1 << 13)
91#define RFD_FDCONSUMED (1 << 14)
92#define RFD_COMPLETE (1 << 15)
93 unsigned short rfd_command;
94#define RFD_CMDSUSPEND (1 << 14)
95#define RFD_CMDEL (1 << 15)
96 unsigned short rfd_link;
97 unsigned short rfd_rbdoffset;
98 unsigned char rfd_dest[6];
99 unsigned char rfd_src[6];
100 unsigned short rfd_len;
101} rfd_t;
102
103typedef struct { /* rbd */
104 unsigned short rbd_status;
105#define RBD_ACNT (0x3fff)
106#define RBD_ACNTVALID (1 << 14)
107#define RBD_EOF (1 << 15)
108 unsigned short rbd_link;
109 unsigned short rbd_bufl;
110 unsigned short rbd_bufh;
111 unsigned short rbd_len;
112} rbd_t;
113
114typedef struct { /* nop */
115 unsigned short nop_status;
116 unsigned short nop_command;
117 unsigned short nop_link;
118} nop_t;
119
120typedef struct { /* set multicast */
121 unsigned short mc_status;
122 unsigned short mc_command;
123 unsigned short mc_link;
124 unsigned short mc_cnt;
125 unsigned char mc_addrs[1][6];
126} mc_t;
127
128typedef struct { /* set address */
129 unsigned short sa_status;
130 unsigned short sa_command;
131 unsigned short sa_link;
132 unsigned char sa_addr[6];
133} sa_t;
134
135typedef struct { /* config command */
136 unsigned short cfg_status;
137 unsigned short cfg_command;
138 unsigned short cfg_link;
139 unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */
140 unsigned char cfg_fifolim; /* FIFO threshold */
141 unsigned char cfg_byte8;
142#define CFG8_SRDY (1 << 6)
143#define CFG8_SAVEBADF (1 << 7)
144 unsigned char cfg_byte9;
145#define CFG9_ADDRLEN(x) (x)
146#define CFG9_ADDRLENBUF (1 << 3)
147#define CFG9_PREAMB2 (0 << 4)
148#define CFG9_PREAMB4 (1 << 4)
149#define CFG9_PREAMB8 (2 << 4)
150#define CFG9_PREAMB16 (3 << 4)
151#define CFG9_ILOOPBACK (1 << 6)
152#define CFG9_ELOOPBACK (1 << 7)
153 unsigned char cfg_byte10;
154#define CFG10_LINPRI(x) (x)
155#define CFG10_ACR(x) (x << 4)
156#define CFG10_BOFMET (1 << 7)
157 unsigned char cfg_ifs;
158 unsigned char cfg_slotl;
159 unsigned char cfg_byte13;
160#define CFG13_SLOTH(x) (x)
161#define CFG13_RETRY(x) (x << 4)
162 unsigned char cfg_byte14;
163#define CFG14_PROMISC (1 << 0)
164#define CFG14_DISBRD (1 << 1)
165#define CFG14_MANCH (1 << 2)
166#define CFG14_TNCRS (1 << 3)
167#define CFG14_NOCRC (1 << 4)
168#define CFG14_CRC16 (1 << 5)
169#define CFG14_BTSTF (1 << 6)
170#define CFG14_FLGPAD (1 << 7)
171 unsigned char cfg_byte15;
172#define CFG15_CSTF(x) (x)
173#define CFG15_ICSS (1 << 3)
174#define CFG15_CDTF(x) (x << 4)
175#define CFG15_ICDS (1 << 7)
176 unsigned short cfg_minfrmlen;
177} cfg_t;
178
179typedef struct { /* scb */
180 unsigned short scb_status; /* status of 82586 */
181#define SCB_STRXMASK (7 << 4) /* Receive unit status */
182#define SCB_STRXIDLE (0 << 4) /* Idle */
183#define SCB_STRXSUSP (1 << 4) /* Suspended */
184#define SCB_STRXNRES (2 << 4) /* No resources */
185#define SCB_STRXRDY (4 << 4) /* Ready */
186#define SCB_STCUMASK (7 << 8) /* Command unit status */
187#define SCB_STCUIDLE (0 << 8) /* Idle */
188#define SCB_STCUSUSP (1 << 8) /* Suspended */
189#define SCB_STCUACTV (2 << 8) /* Active */
190#define SCB_STRNR (1 << 12) /* Receive unit not ready */
191#define SCB_STCNA (1 << 13) /* Command unit not ready */
192#define SCB_STFR (1 << 14) /* Frame received */
193#define SCB_STCX (1 << 15) /* Command completed */
194 unsigned short scb_command; /* Next command */
195#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */
196#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */
197#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */
198#define SCB_CMDRXABORT (4 << 4) /* Abort reception */
199#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */
200#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */
201#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */
202#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */
203#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */
204#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */
205#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */
206#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */
207 unsigned short scb_cbl_offset; /* Offset of first command unit */
208 unsigned short scb_rfa_offset; /* Offset of first receive frame area */
209 unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/
210 unsigned short scb_aln_errors; /* Misaligned frames */
211 unsigned short scb_rsc_errors; /* Frames lost due to no space */
212 unsigned short scb_ovn_errors; /* Frames lost due to slow bus */
213} scb_t;
214
215typedef struct { /* iscp */
216 unsigned short iscp_busy; /* set by CPU before CA */
217 unsigned short iscp_offset; /* offset of SCB */
218 unsigned short iscp_basel; /* base of SCB */
219 unsigned short iscp_baseh;
220} iscp_t;
221
222 /* this address must be 0xfff6 */
223typedef struct { /* scp */
224 unsigned short scp_sysbus; /* bus size */
225#define SCP_SY_16BBUS 0x00
226#define SCP_SY_8BBUS 0x01
227 unsigned short scp_junk[2]; /* junk */
228 unsigned short scp_iscpl; /* lower 16 bits of iscp */
229 unsigned short scp_iscph; /* upper 16 bits of iscp */
230} scp_t;
231
232/* commands */
233#define CMD_NOP 0
234#define CMD_SETADDRESS 1
235#define CMD_CONFIG 2
236#define CMD_SETMULTICAST 3
237#define CMD_TX 4
238#define CMD_TDR 5
239#define CMD_DUMP 6
240#define CMD_DIAGNOSE 7
241
242#define CMD_MASK 7
243
244#define CMD_INTR (1 << 13)
245#define CMD_SUSP (1 << 14)
246#define CMD_EOL (1 << 15)
247
248#define STAT_COLLISIONS (15)
249#define STAT_COLLEXCESSIVE (1 << 5)
250#define STAT_COLLAFTERTX (1 << 6)
251#define STAT_TXDEFERRED (1 << 7)
252#define STAT_TXSLOWDMA (1 << 8)
253#define STAT_TXLOSTCTS (1 << 9)
254#define STAT_NOCARRIER (1 << 10)
255#define STAT_FAIL (1 << 11)
256#define STAT_ABORTED (1 << 12)
257#define STAT_OK (1 << 13)
258#define STAT_BUSY (1 << 14)
259#define STAT_COMPLETE (1 << 15)
260#endif
261#endif
262
263/*
264 * Ether1 card definitions:
265 *
266 * FAST accesses:
267 * +0 Page register
268 * 16 pages
269 * +4 Control
270 * '1' = reset
271 * '2' = loopback
272 * '4' = CA
273 * '8' = int ack
274 *
275 * RAM at address + 0x2000
276 * Pod. Prod id = 3
277 * Words after ID block [base + 8 words]
278 * +0 pcb issue (0x0c and 0xf3 invalid)
279 * +1 - +6 eth hw address
280 */
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
new file mode 100644
index 00000000000..44a8746f401
--- /dev/null
+++ b/drivers/net/arm/ether3.c
@@ -0,0 +1,918 @@
1/*
2 * linux/drivers/acorn/net/ether3.c
3 *
4 * Copyright (C) 1995-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card
11 * for Acorn machines
12 *
13 * By Russell King, with some suggestions from borris@ant.co.uk
14 *
15 * Changelog:
16 * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet
17 * address up to the higher levels - they're
18 * silently ignored. I/F can now be put into
19 * multicast mode. Receiver routine optimised.
20 * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of
21 * the kernel rather than when a module.
22 * 1.06 RMK 02/03/1996 Various code cleanups
23 * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit
24 * routines.
25 * 1.08 RMK 14/10/1996 Fixed problem with too many packets,
26 * prevented the kernel message about dropped
27 * packets appearing too many times a second.
28 * Now does not disable all IRQs, only the IRQ
29 * used by this card.
30 * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low,
31 * but we still service the TX queue if we get a
32 * RX interrupt.
33 * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004.
34 * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A.
35 * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1.
36 * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h.
37 * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets.
38 * Chip seems to have a bug in, whereby if the
39 * packet starts two bytes from the end of the
40 * buffer, it corrupts the receiver chain, and
41 * never updates the transmit status correctly.
42 * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing.
43 * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy
44 * hardware.
45 * 1.16 RMK 10/02/2000 Updated for 2.3.43
46 * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8
47 */
48
49#include <linux/module.h>
50#include <linux/kernel.h>
51#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/interrupt.h>
54#include <linux/ioport.h>
55#include <linux/in.h>
56#include <linux/slab.h>
57#include <linux/string.h>
58#include <linux/errno.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/skbuff.h>
62#include <linux/device.h>
63#include <linux/init.h>
64#include <linux/delay.h>
65#include <linux/bitops.h>
66
67#include <asm/system.h>
68#include <asm/ecard.h>
69#include <asm/io.h>
70
71static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
72
73#include "ether3.h"
74
75static unsigned int net_debug = NET_DEBUG;
76
77static void ether3_setmulticastlist(struct net_device *dev);
78static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
79static void ether3_tx(struct net_device *dev);
80static int ether3_open (struct net_device *dev);
81static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
82static irqreturn_t ether3_interrupt (int irq, void *dev_id);
83static int ether3_close (struct net_device *dev);
84static void ether3_setmulticastlist (struct net_device *dev);
85static void ether3_timeout(struct net_device *dev);
86
87#define BUS_16 2
88#define BUS_8 1
89#define BUS_UNKNOWN 0
90
91/* --------------------------------------------------------------------------- */
92
93typedef enum {
94 buffer_write,
95 buffer_read
96} buffer_rw_t;
97
98/*
99 * ether3 read/write. Slow things down a bit...
100 * The SEEQ8005 doesn't like us writing to its registers
101 * too quickly.
102 */
103static inline void ether3_outb(int v, const void __iomem *r)
104{
105 writeb(v, r);
106 udelay(1);
107}
108
109static inline void ether3_outw(int v, const void __iomem *r)
110{
111 writew(v, r);
112 udelay(1);
113}
114#define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; })
115#define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; })
116
117static int
118ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
119{
120 int timeout = 1000;
121
122 ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
123 ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
124
125 while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) {
126 if (!timeout--) {
127 printk("%s: setbuffer broken\n", dev->name);
128 priv(dev)->broken = 1;
129 return 1;
130 }
131 udelay(1);
132 }
133
134 if (read == buffer_read) {
135 ether3_outw(start, REG_DMAADDR);
136 ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND);
137 } else {
138 ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
139 ether3_outw(start, REG_DMAADDR);
140 }
141 return 0;
142}
143
144/*
145 * write data to the buffer memory
146 */
147#define ether3_writebuffer(dev,data,length) \
148 writesw(REG_BUFWIN, (data), (length) >> 1)
149
150#define ether3_writeword(dev,data) \
151 writew((data), REG_BUFWIN)
152
153#define ether3_writelong(dev,data) { \
154 void __iomem *reg_bufwin = REG_BUFWIN; \
155 writew((data), reg_bufwin); \
156 writew((data) >> 16, reg_bufwin); \
157}
158
159/*
160 * read data from the buffer memory
161 */
162#define ether3_readbuffer(dev,data,length) \
163 readsw(REG_BUFWIN, (data), (length) >> 1)
164
165#define ether3_readword(dev) \
166 readw(REG_BUFWIN)
167
168#define ether3_readlong(dev) \
169 readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16)
170
171/*
172 * Switch LED off...
173 */
174static void ether3_ledoff(unsigned long data)
175{
176 struct net_device *dev = (struct net_device *)data;
177 ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
178}
179
180/*
181 * switch LED on...
182 */
183static inline void ether3_ledon(struct net_device *dev)
184{
185 del_timer(&priv(dev)->timer);
186 priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
187 priv(dev)->timer.data = (unsigned long)dev;
188 priv(dev)->timer.function = ether3_ledoff;
189 add_timer(&priv(dev)->timer);
190 if (priv(dev)->regs.config2 & CFG2_CTRLO)
191 ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2);
192}
193
194/*
195 * Read the ethernet address string from the on board rom.
196 * This is an ascii string!!!
197 */
198static int __devinit
199ether3_addr(char *addr, struct expansion_card *ec)
200{
201 struct in_chunk_dir cd;
202 char *s;
203
204 if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) {
205 int i;
206 for (i = 0; i<6; i++) {
207 addr[i] = simple_strtoul(s + 1, &s, 0x10);
208 if (*s != (i==5?')' : ':' ))
209 break;
210 }
211 if (i == 6)
212 return 0;
213 }
214 /* I wonder if we should even let the user continue in this case
215 * - no, it would be better to disable the device
216 */
217 printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n");
218 return -ENODEV;
219}
220
221/* --------------------------------------------------------------------------- */
222
223static int __devinit
224ether3_ramtest(struct net_device *dev, unsigned char byte)
225{
226 unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
227 int i,ret = 0;
228 int max_errors = 4;
229 int bad = -1;
230
231 if (!buffer)
232 return 1;
233
234 memset(buffer, byte, RX_END);
235 ether3_setbuffer(dev, buffer_write, 0);
236 ether3_writebuffer(dev, buffer, TX_END);
237 ether3_setbuffer(dev, buffer_write, RX_START);
238 ether3_writebuffer(dev, buffer + RX_START, RX_LEN);
239 memset(buffer, byte ^ 0xff, RX_END);
240 ether3_setbuffer(dev, buffer_read, 0);
241 ether3_readbuffer(dev, buffer, TX_END);
242 ether3_setbuffer(dev, buffer_read, RX_START);
243 ether3_readbuffer(dev, buffer + RX_START, RX_LEN);
244
245 for (i = 0; i < RX_END; i++) {
246 if (buffer[i] != byte) {
247 if (max_errors > 0 && bad != buffer[i]) {
248 printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X",
249 dev->name, buffer[i], byte, i);
250 ret = 2;
251 max_errors--;
252 bad = i;
253 }
254 } else {
255 if (bad != -1) {
256 if (bad != i - 1)
257 printk(" - 0x%04X\n", i - 1);
258 printk("\n");
259 bad = -1;
260 }
261 }
262 }
263 if (bad != -1)
264 printk(" - 0xffff\n");
265 kfree(buffer);
266
267 return ret;
268}
269
270/* ------------------------------------------------------------------------------- */
271
272static int __devinit ether3_init_2(struct net_device *dev)
273{
274 int i;
275
276 priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8;
277 priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC;
278 priv(dev)->regs.command = 0;
279
280 /*
281 * Set up our hardware address
282 */
283 ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
284 for (i = 0; i < 6; i++)
285 ether3_outb(dev->dev_addr[i], REG_BUFWIN);
286
287 if (dev->flags & IFF_PROMISC)
288 priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
289 else if (dev->flags & IFF_MULTICAST)
290 priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
291 else
292 priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
293
294 /*
295 * There is a problem with the NQ8005 in that it occasionally loses the
296 * last two bytes. To get round this problem, we receive the CRC as
297 * well. That way, if we do lose the last two, then it doesn't matter.
298 */
299 ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
300 ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
301 ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
302 ether3_outw(0, REG_TRANSMITPTR);
303 ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
304 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
305 ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
306 ether3_outw(priv(dev)->regs.command, REG_COMMAND);
307
308 i = ether3_ramtest(dev, 0x5A);
309 if(i)
310 return i;
311 i = ether3_ramtest(dev, 0x1E);
312 if(i)
313 return i;
314
315 ether3_setbuffer(dev, buffer_write, 0);
316 ether3_writelong(dev, 0);
317 return 0;
318}
319
320static void
321ether3_init_for_open(struct net_device *dev)
322{
323 int i;
324
325 /* Reset the chip */
326 ether3_outw(CFG2_RESET, REG_CONFIG2);
327 udelay(4);
328
329 priv(dev)->regs.command = 0;
330 ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
331 while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
332 barrier();
333
334 ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
335 for (i = 0; i < 6; i++)
336 ether3_outb(dev->dev_addr[i], REG_BUFWIN);
337
338 priv(dev)->tx_head = 0;
339 priv(dev)->tx_tail = 0;
340 priv(dev)->regs.config2 |= CFG2_CTRLO;
341 priv(dev)->rx_head = RX_START;
342
343 ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
344 ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
345 ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
346 ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
347 ether3_outw(0, REG_TRANSMITPTR);
348 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
349 ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
350
351 ether3_setbuffer(dev, buffer_write, 0);
352 ether3_writelong(dev, 0);
353
354 priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX;
355 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
356}
357
358static inline int
359ether3_probe_bus_8(struct net_device *dev, int val)
360{
361 int write_low, write_high, read_low, read_high;
362
363 write_low = val & 255;
364 write_high = val >> 8;
365
366 printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low);
367
368 ether3_outb(write_low, REG_RECVPTR);
369 ether3_outb(write_high, REG_RECVPTR + 4);
370
371 read_low = ether3_inb(REG_RECVPTR);
372 read_high = ether3_inb(REG_RECVPTR + 4);
373
374 printk(", read8 [%02X:%02X]\n", read_high, read_low);
375
376 return read_low == write_low && read_high == write_high;
377}
378
379static inline int
380ether3_probe_bus_16(struct net_device *dev, int val)
381{
382 int read_val;
383
384 ether3_outw(val, REG_RECVPTR);
385 read_val = ether3_inw(REG_RECVPTR);
386
387 printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val);
388
389 return read_val == val;
390}
391
392/*
393 * Open/initialize the board. This is called (in the current kernel)
394 * sometime after booting when the 'ifconfig' program is run.
395 *
396 * This routine should set everything up anew at each open, even
397 * registers that "should" only need to be set once at boot, so that
398 * there is non-reboot way to recover if something goes wrong.
399 */
400static int
401ether3_open(struct net_device *dev)
402{
403 if (!is_valid_ether_addr(dev->dev_addr)) {
404 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
405 dev->name);
406 return -EINVAL;
407 }
408
409 if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
410 return -EAGAIN;
411
412 ether3_init_for_open(dev);
413
414 netif_start_queue(dev);
415
416 return 0;
417}
418
419/*
420 * The inverse routine to ether3_open().
421 */
422static int
423ether3_close(struct net_device *dev)
424{
425 netif_stop_queue(dev);
426
427 disable_irq(dev->irq);
428
429 ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
430 priv(dev)->regs.command = 0;
431 while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
432 barrier();
433 ether3_outb(0x80, REG_CONFIG2 + 4);
434 ether3_outw(0, REG_COMMAND);
435
436 free_irq(dev->irq, dev);
437
438 return 0;
439}
440
441/*
442 * Set or clear promiscuous/multicast mode filter for this adaptor.
443 *
444 * We don't attempt any packet filtering. The card may have a SEEQ 8004
445 * in which does not have the other ethernet address registers present...
446 */
447static void ether3_setmulticastlist(struct net_device *dev)
448{
449 priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC;
450
451 if (dev->flags & IFF_PROMISC) {
452 /* promiscuous mode */
453 priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
454 } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
455 priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
456 } else
457 priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
458
459 ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
460}
461
462static void ether3_timeout(struct net_device *dev)
463{
464 unsigned long flags;
465
466 del_timer(&priv(dev)->timer);
467
468 local_irq_save(flags);
469 printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
470 printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name,
471 ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2));
472 printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name,
473 ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR));
474 printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name,
475 priv(dev)->tx_head, priv(dev)->tx_tail);
476 ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail);
477 printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev));
478 local_irq_restore(flags);
479
480 priv(dev)->regs.config2 |= CFG2_CTRLO;
481 dev->stats.tx_errors += 1;
482 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
483 priv(dev)->tx_head = priv(dev)->tx_tail = 0;
484
485 netif_wake_queue(dev);
486}
487
488/*
489 * Transmit a packet
490 */
491static int
492ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
493{
494 unsigned long flags;
495 unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
496 unsigned int ptr, next_ptr;
497
498 if (priv(dev)->broken) {
499 dev_kfree_skb(skb);
500 dev->stats.tx_dropped++;
501 netif_start_queue(dev);
502 return NETDEV_TX_OK;
503 }
504
505 length = (length + 1) & ~1;
506 if (length != skb->len) {
507 if (skb_padto(skb, length))
508 goto out;
509 }
510
511 next_ptr = (priv(dev)->tx_head + 1) & 15;
512
513 local_irq_save(flags);
514
515 if (priv(dev)->tx_tail == next_ptr) {
516 local_irq_restore(flags);
517 return NETDEV_TX_BUSY; /* unable to queue */
518 }
519
520 ptr = 0x600 * priv(dev)->tx_head;
521 priv(dev)->tx_head = next_ptr;
522 next_ptr *= 0x600;
523
524#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)
525
526 ether3_setbuffer(dev, buffer_write, next_ptr);
527 ether3_writelong(dev, 0);
528 ether3_setbuffer(dev, buffer_write, ptr);
529 ether3_writelong(dev, 0);
530 ether3_writebuffer(dev, skb->data, length);
531 ether3_writeword(dev, htons(next_ptr));
532 ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
533 ether3_setbuffer(dev, buffer_write, ptr);
534 ether3_writeword(dev, htons((ptr + length + 4)));
535 ether3_writeword(dev, TXHDR_FLAGS >> 16);
536 ether3_ledon(dev);
537
538 if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
539 ether3_outw(ptr, REG_TRANSMITPTR);
540 ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
541 }
542
543 next_ptr = (priv(dev)->tx_head + 1) & 15;
544 local_irq_restore(flags);
545
546 dev_kfree_skb(skb);
547
548 if (priv(dev)->tx_tail == next_ptr)
549 netif_stop_queue(dev);
550
551 out:
552 return NETDEV_TX_OK;
553}
554
555static irqreturn_t
556ether3_interrupt(int irq, void *dev_id)
557{
558 struct net_device *dev = (struct net_device *)dev_id;
559 unsigned int status, handled = IRQ_NONE;
560
561#if NET_DEBUG > 1
562 if(net_debug & DEBUG_INT)
563 printk("eth3irq: %d ", irq);
564#endif
565
566 status = ether3_inw(REG_STATUS);
567
568 if (status & STAT_INTRX) {
569 ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND);
570 ether3_rx(dev, 12);
571 handled = IRQ_HANDLED;
572 }
573
574 if (status & STAT_INTTX) {
575 ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND);
576 ether3_tx(dev);
577 handled = IRQ_HANDLED;
578 }
579
580#if NET_DEBUG > 1
581 if(net_debug & DEBUG_INT)
582 printk("done\n");
583#endif
584 return handled;
585}
586
587/*
588 * If we have a good packet(s), get it/them out of the buffers.
589 */
590static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
591{
592 unsigned int next_ptr = priv(dev)->rx_head, received = 0;
593
594 ether3_ledon(dev);
595
596 do {
597 unsigned int this_ptr, status;
598 unsigned char addrs[16];
599
600 /*
601 * read the first 16 bytes from the buffer.
602 * This contains the status bytes etc and ethernet addresses,
603 * and we also check the source ethernet address to see if
604 * it originated from us.
605 */
606 {
607 unsigned int temp_ptr;
608 ether3_setbuffer(dev, buffer_read, next_ptr);
609 temp_ptr = ether3_readword(dev);
610 status = ether3_readword(dev);
611 if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) !=
612 (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr)
613 break;
614
615 this_ptr = next_ptr + 4;
616 next_ptr = ntohs(temp_ptr);
617 }
618 ether3_setbuffer(dev, buffer_read, this_ptr);
619 ether3_readbuffer(dev, addrs+2, 12);
620
621if (next_ptr < RX_START || next_ptr >= RX_END) {
622 int i;
623 printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
624 printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
625 for (i = 2; i < 14; i++)
626 printk("%02X ", addrs[i]);
627 printk("\n");
628 next_ptr = priv(dev)->rx_head;
629 break;
630}
631 /*
632 * ignore our own packets...
633 */
634 if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) &&
635 !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) {
636 maxcnt ++; /* compensate for loopedback packet */
637 ether3_outw(next_ptr >> 8, REG_RECVEND);
638 } else
639 if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) {
640 unsigned int length = next_ptr - this_ptr;
641 struct sk_buff *skb;
642
643 if (next_ptr <= this_ptr)
644 length += RX_END - RX_START;
645
646 skb = dev_alloc_skb(length + 2);
647 if (skb) {
648 unsigned char *buf;
649
650 skb_reserve(skb, 2);
651 buf = skb_put(skb, length);
652 ether3_readbuffer(dev, buf + 12, length - 12);
653 ether3_outw(next_ptr >> 8, REG_RECVEND);
654 *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2);
655 *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4);
656 *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8);
657 *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12);
658 skb->protocol = eth_type_trans(skb, dev);
659 netif_rx(skb);
660 received ++;
661 } else
662 goto dropping;
663 } else {
664 struct net_device_stats *stats = &dev->stats;
665 ether3_outw(next_ptr >> 8, REG_RECVEND);
666 if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
667 if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
668 if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++;
669 if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++;
670 stats->rx_errors++;
671 }
672 }
673 while (-- maxcnt);
674
675done:
676 dev->stats.rx_packets += received;
677 priv(dev)->rx_head = next_ptr;
678 /*
679 * If rx went off line, then that means that the buffer may be full. We
680 * have dropped at least one packet.
681 */
682 if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
683 dev->stats.rx_dropped++;
684 ether3_outw(next_ptr, REG_RECVPTR);
685 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
686 }
687
688 return maxcnt;
689
690dropping:{
691 static unsigned long last_warned;
692
693 ether3_outw(next_ptr >> 8, REG_RECVEND);
694 /*
695 * Don't print this message too many times...
696 */
697 if (time_after(jiffies, last_warned + 10 * HZ)) {
698 last_warned = jiffies;
699 printk("%s: memory squeeze, dropping packet.\n", dev->name);
700 }
701 dev->stats.rx_dropped++;
702 goto done;
703 }
704}
705
706/*
707 * Update stats for the transmitted packet(s)
708 */
709static void ether3_tx(struct net_device *dev)
710{
711 unsigned int tx_tail = priv(dev)->tx_tail;
712 int max_work = 14;
713
714 do {
715 unsigned long status;
716
717 /*
718 * Read the packet header
719 */
720 ether3_setbuffer(dev, buffer_read, tx_tail * 0x600);
721 status = ether3_readlong(dev);
722
723 /*
724 * Check to see if this packet has been transmitted
725 */
726 if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) !=
727 (TXSTAT_DONE | TXHDR_TRANSMIT))
728 break;
729
730 /*
731 * Update errors
732 */
733 if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
734 dev->stats.tx_packets++;
735 else {
736 dev->stats.tx_errors++;
737 if (status & TXSTAT_16COLLISIONS)
738 dev->stats.collisions += 16;
739 if (status & TXSTAT_BABBLED)
740 dev->stats.tx_fifo_errors++;
741 }
742
743 tx_tail = (tx_tail + 1) & 15;
744 } while (--max_work);
745
746 if (priv(dev)->tx_tail != tx_tail) {
747 priv(dev)->tx_tail = tx_tail;
748 netif_wake_queue(dev);
749 }
750}
751
752static void __devinit ether3_banner(void)
753{
754 static unsigned version_printed = 0;
755
756 if (net_debug && version_printed++ == 0)
757 printk(KERN_INFO "%s", version);
758}
759
760static const struct net_device_ops ether3_netdev_ops = {
761 .ndo_open = ether3_open,
762 .ndo_stop = ether3_close,
763 .ndo_start_xmit = ether3_sendpacket,
764 .ndo_set_multicast_list = ether3_setmulticastlist,
765 .ndo_tx_timeout = ether3_timeout,
766 .ndo_validate_addr = eth_validate_addr,
767 .ndo_change_mtu = eth_change_mtu,
768 .ndo_set_mac_address = eth_mac_addr,
769};
770
771static int __devinit
772ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
773{
774 const struct ether3_data *data = id->data;
775 struct net_device *dev;
776 int bus_type, ret;
777
778 ether3_banner();
779
780 ret = ecard_request_resources(ec);
781 if (ret)
782 goto out;
783
784 dev = alloc_etherdev(sizeof(struct dev_priv));
785 if (!dev) {
786 ret = -ENOMEM;
787 goto release;
788 }
789
790 SET_NETDEV_DEV(dev, &ec->dev);
791
792 priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
793 if (!priv(dev)->base) {
794 ret = -ENOMEM;
795 goto free;
796 }
797
798 ec->irqaddr = priv(dev)->base + data->base_offset;
799 ec->irqmask = 0xf0;
800
801 priv(dev)->seeq = priv(dev)->base + data->base_offset;
802 dev->irq = ec->irq;
803
804 ether3_addr(dev->dev_addr, ec);
805
806 init_timer(&priv(dev)->timer);
807
808 /* Reset card...
809 */
810 ether3_outb(0x80, REG_CONFIG2 + 4);
811 bus_type = BUS_UNKNOWN;
812 udelay(4);
813
814 /* Test using Receive Pointer (16-bit register) to find out
815 * how the ether3 is connected to the bus...
816 */
817 if (ether3_probe_bus_8(dev, 0x100) &&
818 ether3_probe_bus_8(dev, 0x201))
819 bus_type = BUS_8;
820
821 if (bus_type == BUS_UNKNOWN &&
822 ether3_probe_bus_16(dev, 0x101) &&
823 ether3_probe_bus_16(dev, 0x201))
824 bus_type = BUS_16;
825
826 switch (bus_type) {
827 case BUS_UNKNOWN:
828 printk(KERN_ERR "%s: unable to identify bus width\n", dev->name);
829 ret = -ENODEV;
830 goto free;
831
832 case BUS_8:
833 printk(KERN_ERR "%s: %s found, but is an unsupported "
834 "8-bit card\n", dev->name, data->name);
835 ret = -ENODEV;
836 goto free;
837
838 default:
839 break;
840 }
841
842 if (ether3_init_2(dev)) {
843 ret = -ENODEV;
844 goto free;
845 }
846
847 dev->netdev_ops = &ether3_netdev_ops;
848 dev->watchdog_timeo = 5 * HZ / 100;
849
850 ret = register_netdev(dev);
851 if (ret)
852 goto free;
853
854 printk("%s: %s in slot %d, %pM\n",
855 dev->name, data->name, ec->slot_no, dev->dev_addr);
856
857 ecard_set_drvdata(ec, dev);
858 return 0;
859
860 free:
861 free_netdev(dev);
862 release:
863 ecard_release_resources(ec);
864 out:
865 return ret;
866}
867
868static void __devexit ether3_remove(struct expansion_card *ec)
869{
870 struct net_device *dev = ecard_get_drvdata(ec);
871
872 ecard_set_drvdata(ec, NULL);
873
874 unregister_netdev(dev);
875 free_netdev(dev);
876 ecard_release_resources(ec);
877}
878
879static struct ether3_data ether3 = {
880 .name = "ether3",
881 .base_offset = 0,
882};
883
884static struct ether3_data etherb = {
885 .name = "etherb",
886 .base_offset = 0x800,
887};
888
889static const struct ecard_id ether3_ids[] = {
890 { MANU_ANT2, PROD_ANT_ETHER3, &ether3 },
891 { MANU_ANT, PROD_ANT_ETHER3, &ether3 },
892 { MANU_ANT, PROD_ANT_ETHERB, &etherb },
893 { 0xffff, 0xffff }
894};
895
896static struct ecard_driver ether3_driver = {
897 .probe = ether3_probe,
898 .remove = __devexit_p(ether3_remove),
899 .id_table = ether3_ids,
900 .drv = {
901 .name = "ether3",
902 },
903};
904
905static int __init ether3_init(void)
906{
907 return ecard_register_driver(&ether3_driver);
908}
909
910static void __exit ether3_exit(void)
911{
912 ecard_remove_driver(&ether3_driver);
913}
914
915module_init(ether3_init);
916module_exit(ether3_exit);
917
918MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
new file mode 100644
index 00000000000..2db63b08bdf
--- /dev/null
+++ b/drivers/net/arm/ether3.h
@@ -0,0 +1,176 @@
1/*
2 * linux/drivers/acorn/net/ether3.h
3 *
4 * Copyright (C) 1995-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * network driver for Acorn/ANT Ether3 cards
11 */
12
13#ifndef _LINUX_ether3_H
14#define _LINUX_ether3_H
15
16/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
17#define DEBUG_TX 2
18#define DEBUG_RX 4
19#define DEBUG_INT 8
20#define DEBUG_IC 16
21#ifndef NET_DEBUG
22#define NET_DEBUG 0
23#endif
24
25#define priv(dev) ((struct dev_priv *)netdev_priv(dev))
26
27/* Command register definitions & bits */
28#define REG_COMMAND (priv(dev)->seeq + 0x0000)
29#define CMD_ENINTDMA 0x0001
30#define CMD_ENINTRX 0x0002
31#define CMD_ENINTTX 0x0004
32#define CMD_ENINTBUFWIN 0x0008
33#define CMD_ACKINTDMA 0x0010
34#define CMD_ACKINTRX 0x0020
35#define CMD_ACKINTTX 0x0040
36#define CMD_ACKINTBUFWIN 0x0080
37#define CMD_DMAON 0x0100
38#define CMD_RXON 0x0200
39#define CMD_TXON 0x0400
40#define CMD_DMAOFF 0x0800
41#define CMD_RXOFF 0x1000
42#define CMD_TXOFF 0x2000
43#define CMD_FIFOREAD 0x4000
44#define CMD_FIFOWRITE 0x8000
45
46/* status register */
47#define REG_STATUS (priv(dev)->seeq + 0x0000)
48#define STAT_ENINTSTAT 0x0001
49#define STAT_ENINTRX 0x0002
50#define STAT_ENINTTX 0x0004
51#define STAT_ENINTBUFWIN 0x0008
52#define STAT_INTDMA 0x0010
53#define STAT_INTRX 0x0020
54#define STAT_INTTX 0x0040
55#define STAT_INTBUFWIN 0x0080
56#define STAT_DMAON 0x0100
57#define STAT_RXON 0x0200
58#define STAT_TXON 0x0400
59#define STAT_FIFOFULL 0x2000
60#define STAT_FIFOEMPTY 0x4000
61#define STAT_FIFODIR 0x8000
62
63/* configuration register 1 */
64#define REG_CONFIG1 (priv(dev)->seeq + 0x0040)
65#define CFG1_BUFSELSTAT0 0x0000
66#define CFG1_BUFSELSTAT1 0x0001
67#define CFG1_BUFSELSTAT2 0x0002
68#define CFG1_BUFSELSTAT3 0x0003
69#define CFG1_BUFSELSTAT4 0x0004
70#define CFG1_BUFSELSTAT5 0x0005
71#define CFG1_ADDRPROM 0x0006
72#define CFG1_TRANSEND 0x0007
73#define CFG1_LOCBUFMEM 0x0008
74#define CFG1_INTVECTOR 0x0009
75#define CFG1_RECVSPECONLY 0x0000
76#define CFG1_RECVSPECBROAD 0x4000
77#define CFG1_RECVSPECBRMULTI 0x8000
78#define CFG1_RECVPROMISC 0xC000
79
80/* The following aren't in 8004 */
81#define CFG1_DMABURSTCONT 0x0000
82#define CFG1_DMABURST800NS 0x0010
83#define CFG1_DMABURST1600NS 0x0020
84#define CFG1_DMABURST3200NS 0x0030
85#define CFG1_DMABURST1 0x0000
86#define CFG1_DMABURST4 0x0040
87#define CFG1_DMABURST8 0x0080
88#define CFG1_DMABURST16 0x00C0
89#define CFG1_RECVCOMPSTAT0 0x0100
90#define CFG1_RECVCOMPSTAT1 0x0200
91#define CFG1_RECVCOMPSTAT2 0x0400
92#define CFG1_RECVCOMPSTAT3 0x0800
93#define CFG1_RECVCOMPSTAT4 0x1000
94#define CFG1_RECVCOMPSTAT5 0x2000
95
96/* configuration register 2 */
97#define REG_CONFIG2 (priv(dev)->seeq + 0x0080)
98#define CFG2_BYTESWAP 0x0001
99#define CFG2_ERRENCRC 0x0008
100#define CFG2_ERRENDRIBBLE 0x0010
101#define CFG2_ERRSHORTFRAME 0x0020
102#define CFG2_SLOTSELECT 0x0040
103#define CFG2_PREAMSELECT 0x0080
104#define CFG2_ADDRLENGTH 0x0100
105#define CFG2_RECVCRC 0x0200
106#define CFG2_XMITNOCRC 0x0400
107#define CFG2_LOOPBACK 0x0800
108#define CFG2_CTRLO 0x1000
109#define CFG2_RESET 0x8000
110
111#define REG_RECVEND (priv(dev)->seeq + 0x00c0)
112
113#define REG_BUFWIN (priv(dev)->seeq + 0x0100)
114
115#define REG_RECVPTR (priv(dev)->seeq + 0x0140)
116
117#define REG_TRANSMITPTR (priv(dev)->seeq + 0x0180)
118
119#define REG_DMAADDR (priv(dev)->seeq + 0x01c0)
120
121/*
122 * Cards transmit/receive headers
123 */
124#define TX_NEXT (0xffff)
125#define TXHDR_ENBABBLEINT (1 << 16)
126#define TXHDR_ENCOLLISIONINT (1 << 17)
127#define TXHDR_EN16COLLISION (1 << 18)
128#define TXHDR_ENSUCCESS (1 << 19)
129#define TXHDR_DATAFOLLOWS (1 << 21)
130#define TXHDR_CHAINCONTINUE (1 << 22)
131#define TXHDR_TRANSMIT (1 << 23)
132#define TXSTAT_BABBLED (1 << 24)
133#define TXSTAT_COLLISION (1 << 25)
134#define TXSTAT_16COLLISIONS (1 << 26)
135#define TXSTAT_DONE (1 << 31)
136
137#define RX_NEXT (0xffff)
138#define RXHDR_CHAINCONTINUE (1 << 6)
139#define RXHDR_RECEIVE (1 << 7)
140#define RXSTAT_OVERSIZE (1 << 8)
141#define RXSTAT_CRCERROR (1 << 9)
142#define RXSTAT_DRIBBLEERROR (1 << 10)
143#define RXSTAT_SHORTPACKET (1 << 11)
144#define RXSTAT_DONE (1 << 15)
145
146
147#define TX_START 0x0000
148#define TX_END 0x6000
149#define RX_START 0x6000
150#define RX_LEN 0xA000
151#define RX_END 0x10000
152/* must be a power of 2 and greater than MAX_TX_BUFFERED */
153#define MAX_TXED 16
154#define MAX_TX_BUFFERED 10
155
156struct dev_priv {
157 void __iomem *base;
158 void __iomem *seeq;
159 struct {
160 unsigned int command;
161 unsigned int config1;
162 unsigned int config2;
163 } regs;
164 unsigned char tx_head; /* buffer nr to insert next packet */
165 unsigned char tx_tail; /* buffer nr of transmitting packet */
166 unsigned int rx_head; /* address to fetch next packet from */
167 struct timer_list timer;
168 int broken; /* 0 = ok, 1 = something went wrong */
169};
170
171struct ether3_data {
172 const char name[8];
173 unsigned long base_offset;
174};
175
176#endif
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
new file mode 100644
index 00000000000..03e217a868d
--- /dev/null
+++ b/drivers/net/arm/etherh.c
@@ -0,0 +1,866 @@
1/*
2 * linux/drivers/acorn/net/etherh.c
3 *
4 * Copyright (C) 2000-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * NS8390 I-cubed EtherH and ANT EtherM specific driver
11 * Thanks to I-Cubed for information on their cards.
12 * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton
13 * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan)
14 * EtherM integration re-engineered by Russell King.
15 *
16 * Changelog:
17 * 08-12-1996 RMK 1.00 Created
18 * RMK 1.03 Added support for EtherLan500 cards
19 * 23-11-1997 RMK 1.04 Added media autodetection
20 * 16-04-1998 RMK 1.05 Improved media autodetection
21 * 10-02-2000 RMK 1.06 Updated for 2.3.43
22 * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8
23 * 12-10-1999 CK/TEW EtherM driver first release
24 * 21-12-2000 TTC EtherH/EtherM integration
25 * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver.
26 * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot.
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/types.h>
32#include <linux/fcntl.h>
33#include <linux/interrupt.h>
34#include <linux/ioport.h>
35#include <linux/in.h>
36#include <linux/string.h>
37#include <linux/errno.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/skbuff.h>
42#include <linux/delay.h>
43#include <linux/device.h>
44#include <linux/init.h>
45#include <linux/bitops.h>
46#include <linux/jiffies.h>
47
48#include <asm/system.h>
49#include <asm/ecard.h>
50#include <asm/io.h>
51
52#define EI_SHIFT(x) (ei_local->reg_offset[x])
53
54#define ei_inb(_p) readb((void __iomem *)_p)
55#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p)
56#define ei_inb_p(_p) readb((void __iomem *)_p)
57#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
58
59#define NET_DEBUG 0
60#define DEBUG_INIT 2
61
62#define DRV_NAME "etherh"
63#define DRV_VERSION "1.11"
64
65static char version[] __initdata =
66 "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
67
68#include "../lib8390.c"
69
70static unsigned int net_debug = NET_DEBUG;
71
72struct etherh_priv {
73 void __iomem *ioc_fast;
74 void __iomem *memc;
75 void __iomem *dma_base;
76 unsigned int id;
77 void __iomem *ctrl_port;
78 unsigned char ctrl;
79 u32 supported;
80};
81
82struct etherh_data {
83 unsigned long ns8390_offset;
84 unsigned long dataport_offset;
85 unsigned long ctrlport_offset;
86 int ctrl_ioc;
87 const char name[16];
88 u32 supported;
89 unsigned char tx_start_page;
90 unsigned char stop_page;
91};
92
93MODULE_AUTHOR("Russell King");
94MODULE_DESCRIPTION("EtherH/EtherM driver");
95MODULE_LICENSE("GPL");
96
97#define ETHERH500_DATAPORT 0x800 /* MEMC */
98#define ETHERH500_NS8390 0x000 /* MEMC */
99#define ETHERH500_CTRLPORT 0x800 /* IOC */
100
101#define ETHERH600_DATAPORT 0x040 /* MEMC */
102#define ETHERH600_NS8390 0x800 /* MEMC */
103#define ETHERH600_CTRLPORT 0x200 /* MEMC */
104
105#define ETHERH_CP_IE 1
106#define ETHERH_CP_IF 2
107#define ETHERH_CP_HEARTBEAT 2
108
109#define ETHERH_TX_START_PAGE 1
110#define ETHERH_STOP_PAGE 127
111
112/*
113 * These came from CK/TEW
114 */
115#define ETHERM_DATAPORT 0x200 /* MEMC */
116#define ETHERM_NS8390 0x800 /* MEMC */
117#define ETHERM_CTRLPORT 0x23c /* MEMC */
118
119#define ETHERM_TX_START_PAGE 64
120#define ETHERM_STOP_PAGE 127
121
122/* ------------------------------------------------------------------------ */
123
124#define etherh_priv(dev) \
125 ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device)))
126
127static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask)
128{
129 unsigned char ctrl = eh->ctrl | mask;
130 eh->ctrl = ctrl;
131 writeb(ctrl, eh->ctrl_port);
132}
133
134static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask)
135{
136 unsigned char ctrl = eh->ctrl & ~mask;
137 eh->ctrl = ctrl;
138 writeb(ctrl, eh->ctrl_port);
139}
140
141static inline unsigned int etherh_get_stat(struct etherh_priv *eh)
142{
143 return readb(eh->ctrl_port);
144}
145
146
147
148
149static void etherh_irq_enable(ecard_t *ec, int irqnr)
150{
151 struct etherh_priv *eh = ec->irq_data;
152
153 etherh_set_ctrl(eh, ETHERH_CP_IE);
154}
155
156static void etherh_irq_disable(ecard_t *ec, int irqnr)
157{
158 struct etherh_priv *eh = ec->irq_data;
159
160 etherh_clr_ctrl(eh, ETHERH_CP_IE);
161}
162
163static expansioncard_ops_t etherh_ops = {
164 .irqenable = etherh_irq_enable,
165 .irqdisable = etherh_irq_disable,
166};
167
168
169
170
171static void
172etherh_setif(struct net_device *dev)
173{
174 struct ei_device *ei_local = netdev_priv(dev);
175 unsigned long flags;
176 void __iomem *addr;
177
178 local_irq_save(flags);
179
180 /* set the interface type */
181 switch (etherh_priv(dev)->id) {
182 case PROD_I3_ETHERLAN600:
183 case PROD_I3_ETHERLAN600A:
184 addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
185
186 switch (dev->if_port) {
187 case IF_PORT_10BASE2:
188 writeb((readb(addr) & 0xf8) | 1, addr);
189 break;
190 case IF_PORT_10BASET:
191 writeb((readb(addr) & 0xf8), addr);
192 break;
193 }
194 break;
195
196 case PROD_I3_ETHERLAN500:
197 switch (dev->if_port) {
198 case IF_PORT_10BASE2:
199 etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF);
200 break;
201
202 case IF_PORT_10BASET:
203 etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF);
204 break;
205 }
206 break;
207
208 default:
209 break;
210 }
211
212 local_irq_restore(flags);
213}
214
215static int
216etherh_getifstat(struct net_device *dev)
217{
218 struct ei_device *ei_local = netdev_priv(dev);
219 void __iomem *addr;
220 int stat = 0;
221
222 switch (etherh_priv(dev)->id) {
223 case PROD_I3_ETHERLAN600:
224 case PROD_I3_ETHERLAN600A:
225 addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
226 switch (dev->if_port) {
227 case IF_PORT_10BASE2:
228 stat = 1;
229 break;
230 case IF_PORT_10BASET:
231 stat = readb(addr) & 4;
232 break;
233 }
234 break;
235
236 case PROD_I3_ETHERLAN500:
237 switch (dev->if_port) {
238 case IF_PORT_10BASE2:
239 stat = 1;
240 break;
241 case IF_PORT_10BASET:
242 stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT;
243 break;
244 }
245 break;
246
247 default:
248 stat = 0;
249 break;
250 }
251
252 return stat != 0;
253}
254
255/*
256 * Configure the interface. Note that we ignore the other
257 * parts of ifmap, since its mostly meaningless for this driver.
258 */
259static int etherh_set_config(struct net_device *dev, struct ifmap *map)
260{
261 switch (map->port) {
262 case IF_PORT_10BASE2:
263 case IF_PORT_10BASET:
264 /*
265 * If the user explicitly sets the interface
266 * media type, turn off automedia detection.
267 */
268 dev->flags &= ~IFF_AUTOMEDIA;
269 dev->if_port = map->port;
270 break;
271
272 default:
273 return -EINVAL;
274 }
275
276 etherh_setif(dev);
277
278 return 0;
279}
280
281/*
282 * Reset the 8390 (hard reset). Note that we can't actually do this.
283 */
284static void
285etherh_reset(struct net_device *dev)
286{
287 struct ei_device *ei_local = netdev_priv(dev);
288 void __iomem *addr = (void __iomem *)dev->base_addr;
289
290 writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
291
292 /*
293 * See if we need to change the interface type.
294 * Note that we use 'interface_num' as a flag
295 * to indicate that we need to change the media.
296 */
297 if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) {
298 ei_local->interface_num = 0;
299
300 if (dev->if_port == IF_PORT_10BASET)
301 dev->if_port = IF_PORT_10BASE2;
302 else
303 dev->if_port = IF_PORT_10BASET;
304
305 etherh_setif(dev);
306 }
307}
308
309/*
310 * Write a block of data out to the 8390
311 */
312static void
313etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page)
314{
315 struct ei_device *ei_local = netdev_priv(dev);
316 unsigned long dma_start;
317 void __iomem *dma_base, *addr;
318
319 if (ei_local->dmaing) {
320 printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
321 " DMAstat %d irqlock %d\n", dev->name,
322 ei_local->dmaing, ei_local->irqlock);
323 return;
324 }
325
326 /*
327 * Make sure we have a round number of bytes if we're in word mode.
328 */
329 if (count & 1 && ei_local->word16)
330 count++;
331
332 ei_local->dmaing = 1;
333
334 addr = (void __iomem *)dev->base_addr;
335 dma_base = etherh_priv(dev)->dma_base;
336
337 count = (count + 1) & ~1;
338 writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
339
340 writeb (0x42, addr + EN0_RCNTLO);
341 writeb (0x00, addr + EN0_RCNTHI);
342 writeb (0x42, addr + EN0_RSARLO);
343 writeb (0x00, addr + EN0_RSARHI);
344 writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
345
346 udelay (1);
347
348 writeb (ENISR_RDC, addr + EN0_ISR);
349 writeb (count, addr + EN0_RCNTLO);
350 writeb (count >> 8, addr + EN0_RCNTHI);
351 writeb (0, addr + EN0_RSARLO);
352 writeb (start_page, addr + EN0_RSARHI);
353 writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD);
354
355 if (ei_local->word16)
356 writesw (dma_base, buf, count >> 1);
357 else
358 writesb (dma_base, buf, count);
359
360 dma_start = jiffies;
361
362 while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
363 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
364 printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
365 dev->name);
366 etherh_reset (dev);
367 __NS8390_init (dev, 1);
368 break;
369 }
370
371 writeb (ENISR_RDC, addr + EN0_ISR);
372 ei_local->dmaing = 0;
373}
374
375/*
376 * Read a block of data from the 8390
377 */
378static void
379etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
380{
381 struct ei_device *ei_local = netdev_priv(dev);
382 unsigned char *buf;
383 void __iomem *dma_base, *addr;
384
385 if (ei_local->dmaing) {
386 printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
387 " DMAstat %d irqlock %d\n", dev->name,
388 ei_local->dmaing, ei_local->irqlock);
389 return;
390 }
391
392 ei_local->dmaing = 1;
393
394 addr = (void __iomem *)dev->base_addr;
395 dma_base = etherh_priv(dev)->dma_base;
396
397 buf = skb->data;
398 writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
399 writeb (count, addr + EN0_RCNTLO);
400 writeb (count >> 8, addr + EN0_RCNTHI);
401 writeb (ring_offset, addr + EN0_RSARLO);
402 writeb (ring_offset >> 8, addr + EN0_RSARHI);
403 writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
404
405 if (ei_local->word16) {
406 readsw (dma_base, buf, count >> 1);
407 if (count & 1)
408 buf[count - 1] = readb (dma_base);
409 } else
410 readsb (dma_base, buf, count);
411
412 writeb (ENISR_RDC, addr + EN0_ISR);
413 ei_local->dmaing = 0;
414}
415
416/*
417 * Read a header from the 8390
418 */
419static void
420etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
421{
422 struct ei_device *ei_local = netdev_priv(dev);
423 void __iomem *dma_base, *addr;
424
425 if (ei_local->dmaing) {
426 printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
427 " DMAstat %d irqlock %d\n", dev->name,
428 ei_local->dmaing, ei_local->irqlock);
429 return;
430 }
431
432 ei_local->dmaing = 1;
433
434 addr = (void __iomem *)dev->base_addr;
435 dma_base = etherh_priv(dev)->dma_base;
436
437 writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
438 writeb (sizeof (*hdr), addr + EN0_RCNTLO);
439 writeb (0, addr + EN0_RCNTHI);
440 writeb (0, addr + EN0_RSARLO);
441 writeb (ring_page, addr + EN0_RSARHI);
442 writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
443
444 if (ei_local->word16)
445 readsw (dma_base, hdr, sizeof (*hdr) >> 1);
446 else
447 readsb (dma_base, hdr, sizeof (*hdr));
448
449 writeb (ENISR_RDC, addr + EN0_ISR);
450 ei_local->dmaing = 0;
451}
452
453/*
454 * Open/initialize the board. This is called (in the current kernel)
455 * sometime after booting when the 'ifconfig' program is run.
456 *
457 * This routine should set everything up anew at each open, even
458 * registers that "should" only need to be set once at boot, so that
459 * there is non-reboot way to recover if something goes wrong.
460 */
461static int
462etherh_open(struct net_device *dev)
463{
464 struct ei_device *ei_local = netdev_priv(dev);
465
466 if (!is_valid_ether_addr(dev->dev_addr)) {
467 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
468 dev->name);
469 return -EINVAL;
470 }
471
472 if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
473 return -EAGAIN;
474
475 /*
476 * Make sure that we aren't going to change the
477 * media type on the next reset - we are about to
478 * do automedia manually now.
479 */
480 ei_local->interface_num = 0;
481
482 /*
483 * If we are doing automedia detection, do it now.
484 * This is more reliable than the 8390's detection.
485 */
486 if (dev->flags & IFF_AUTOMEDIA) {
487 dev->if_port = IF_PORT_10BASET;
488 etherh_setif(dev);
489 mdelay(1);
490 if (!etherh_getifstat(dev)) {
491 dev->if_port = IF_PORT_10BASE2;
492 etherh_setif(dev);
493 }
494 } else
495 etherh_setif(dev);
496
497 etherh_reset(dev);
498 __ei_open(dev);
499
500 return 0;
501}
502
503/*
504 * The inverse routine to etherh_open().
505 */
506static int
507etherh_close(struct net_device *dev)
508{
509 __ei_close (dev);
510 free_irq (dev->irq, dev);
511 return 0;
512}
513
514/*
515 * Initialisation
516 */
517
518static void __init etherh_banner(void)
519{
520 static int version_printed;
521
522 if (net_debug && version_printed++ == 0)
523 printk(KERN_INFO "%s", version);
524}
525
526/*
527 * Read the ethernet address string from the on board rom.
528 * This is an ascii string...
529 */
530static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
531{
532 struct in_chunk_dir cd;
533 char *s;
534
535 if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
536 printk(KERN_ERR "%s: unable to read podule description string\n",
537 dev_name(&ec->dev));
538 goto no_addr;
539 }
540
541 s = strchr(cd.d.string, '(');
542 if (s) {
543 int i;
544
545 for (i = 0; i < 6; i++) {
546 addr[i] = simple_strtoul(s + 1, &s, 0x10);
547 if (*s != (i == 5? ')' : ':'))
548 break;
549 }
550
551 if (i == 6)
552 return 0;
553 }
554
555 printk(KERN_ERR "%s: unable to parse MAC address: %s\n",
556 dev_name(&ec->dev), cd.d.string);
557
558 no_addr:
559 return -ENODEV;
560}
561
562/*
563 * Create an ethernet address from the system serial number.
564 */
565static int __init etherm_addr(char *addr)
566{
567 unsigned int serial;
568
569 if (system_serial_low == 0 && system_serial_high == 0)
570 return -ENODEV;
571
572 serial = system_serial_low | system_serial_high;
573
574 addr[0] = 0;
575 addr[1] = 0;
576 addr[2] = 0xa4;
577 addr[3] = 0x10 + (serial >> 24);
578 addr[4] = serial >> 16;
579 addr[5] = serial >> 8;
580 return 0;
581}
582
583static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
584{
585 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
586 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
587 strlcpy(info->bus_info, dev_name(dev->dev.parent),
588 sizeof(info->bus_info));
589}
590
591static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
592{
593 cmd->supported = etherh_priv(dev)->supported;
594 ethtool_cmd_speed_set(cmd, SPEED_10);
595 cmd->duplex = DUPLEX_HALF;
596 cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
597 cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
598 AUTONEG_ENABLE : AUTONEG_DISABLE);
599 return 0;
600}
601
602static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
603{
604 switch (cmd->autoneg) {
605 case AUTONEG_ENABLE:
606 dev->flags |= IFF_AUTOMEDIA;
607 break;
608
609 case AUTONEG_DISABLE:
610 switch (cmd->port) {
611 case PORT_TP:
612 dev->if_port = IF_PORT_10BASET;
613 break;
614
615 case PORT_BNC:
616 dev->if_port = IF_PORT_10BASE2;
617 break;
618
619 default:
620 return -EINVAL;
621 }
622 dev->flags &= ~IFF_AUTOMEDIA;
623 break;
624
625 default:
626 return -EINVAL;
627 }
628
629 etherh_setif(dev);
630
631 return 0;
632}
633
634static const struct ethtool_ops etherh_ethtool_ops = {
635 .get_settings = etherh_get_settings,
636 .set_settings = etherh_set_settings,
637 .get_drvinfo = etherh_get_drvinfo,
638};
639
640static const struct net_device_ops etherh_netdev_ops = {
641 .ndo_open = etherh_open,
642 .ndo_stop = etherh_close,
643 .ndo_set_config = etherh_set_config,
644 .ndo_start_xmit = __ei_start_xmit,
645 .ndo_tx_timeout = __ei_tx_timeout,
646 .ndo_get_stats = __ei_get_stats,
647 .ndo_set_multicast_list = __ei_set_multicast_list,
648 .ndo_validate_addr = eth_validate_addr,
649 .ndo_set_mac_address = eth_mac_addr,
650 .ndo_change_mtu = eth_change_mtu,
651#ifdef CONFIG_NET_POLL_CONTROLLER
652 .ndo_poll_controller = __ei_poll,
653#endif
654};
655
656static u32 etherh_regoffsets[16];
657static u32 etherm_regoffsets[16];
658
659static int __devinit
660etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
661{
662 const struct etherh_data *data = id->data;
663 struct ei_device *ei_local;
664 struct net_device *dev;
665 struct etherh_priv *eh;
666 int ret;
667
668 etherh_banner();
669
670 ret = ecard_request_resources(ec);
671 if (ret)
672 goto out;
673
674 dev = ____alloc_ei_netdev(sizeof(struct etherh_priv));
675 if (!dev) {
676 ret = -ENOMEM;
677 goto release;
678 }
679
680 SET_NETDEV_DEV(dev, &ec->dev);
681
682 dev->netdev_ops = &etherh_netdev_ops;
683 dev->irq = ec->irq;
684 dev->ethtool_ops = &etherh_ethtool_ops;
685
686 if (data->supported & SUPPORTED_Autoneg)
687 dev->flags |= IFF_AUTOMEDIA;
688 if (data->supported & SUPPORTED_TP) {
689 dev->flags |= IFF_PORTSEL;
690 dev->if_port = IF_PORT_10BASET;
691 } else if (data->supported & SUPPORTED_BNC) {
692 dev->flags |= IFF_PORTSEL;
693 dev->if_port = IF_PORT_10BASE2;
694 } else
695 dev->if_port = IF_PORT_UNKNOWN;
696
697 eh = etherh_priv(dev);
698 eh->supported = data->supported;
699 eh->ctrl = 0;
700 eh->id = ec->cid.product;
701 eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE);
702 if (!eh->memc) {
703 ret = -ENOMEM;
704 goto free;
705 }
706
707 eh->ctrl_port = eh->memc;
708 if (data->ctrl_ioc) {
709 eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE);
710 if (!eh->ioc_fast) {
711 ret = -ENOMEM;
712 goto free;
713 }
714 eh->ctrl_port = eh->ioc_fast;
715 }
716
717 dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset;
718 eh->dma_base = eh->memc + data->dataport_offset;
719 eh->ctrl_port += data->ctrlport_offset;
720
721 /*
722 * IRQ and control port handling - only for non-NIC slot cards.
723 */
724 if (ec->slot_no != 8) {
725 ecard_setirq(ec, &etherh_ops, eh);
726 } else {
727 /*
728 * If we're in the NIC slot, make sure the IRQ is enabled
729 */
730 etherh_set_ctrl(eh, ETHERH_CP_IE);
731 }
732
733 ei_local = netdev_priv(dev);
734 spin_lock_init(&ei_local->page_lock);
735
736 if (ec->cid.product == PROD_ANT_ETHERM) {
737 etherm_addr(dev->dev_addr);
738 ei_local->reg_offset = etherm_regoffsets;
739 } else {
740 etherh_addr(dev->dev_addr, ec);
741 ei_local->reg_offset = etherh_regoffsets;
742 }
743
744 ei_local->name = dev->name;
745 ei_local->word16 = 1;
746 ei_local->tx_start_page = data->tx_start_page;
747 ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES;
748 ei_local->stop_page = data->stop_page;
749 ei_local->reset_8390 = etherh_reset;
750 ei_local->block_input = etherh_block_input;
751 ei_local->block_output = etherh_block_output;
752 ei_local->get_8390_hdr = etherh_get_header;
753 ei_local->interface_num = 0;
754
755 etherh_reset(dev);
756 __NS8390_init(dev, 0);
757
758 ret = register_netdev(dev);
759 if (ret)
760 goto free;
761
762 printk(KERN_INFO "%s: %s in slot %d, %pM\n",
763 dev->name, data->name, ec->slot_no, dev->dev_addr);
764
765 ecard_set_drvdata(ec, dev);
766
767 return 0;
768
769 free:
770 free_netdev(dev);
771 release:
772 ecard_release_resources(ec);
773 out:
774 return ret;
775}
776
777static void __devexit etherh_remove(struct expansion_card *ec)
778{
779 struct net_device *dev = ecard_get_drvdata(ec);
780
781 ecard_set_drvdata(ec, NULL);
782
783 unregister_netdev(dev);
784
785 free_netdev(dev);
786
787 ecard_release_resources(ec);
788}
789
790static struct etherh_data etherm_data = {
791 .ns8390_offset = ETHERM_NS8390,
792 .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT,
793 .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT,
794 .name = "ANT EtherM",
795 .supported = SUPPORTED_10baseT_Half,
796 .tx_start_page = ETHERM_TX_START_PAGE,
797 .stop_page = ETHERM_STOP_PAGE,
798};
799
800static struct etherh_data etherlan500_data = {
801 .ns8390_offset = ETHERH500_NS8390,
802 .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT,
803 .ctrlport_offset = ETHERH500_CTRLPORT,
804 .ctrl_ioc = 1,
805 .name = "i3 EtherH 500",
806 .supported = SUPPORTED_10baseT_Half,
807 .tx_start_page = ETHERH_TX_START_PAGE,
808 .stop_page = ETHERH_STOP_PAGE,
809};
810
811static struct etherh_data etherlan600_data = {
812 .ns8390_offset = ETHERH600_NS8390,
813 .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
814 .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
815 .name = "i3 EtherH 600",
816 .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
817 .tx_start_page = ETHERH_TX_START_PAGE,
818 .stop_page = ETHERH_STOP_PAGE,
819};
820
821static struct etherh_data etherlan600a_data = {
822 .ns8390_offset = ETHERH600_NS8390,
823 .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
824 .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
825 .name = "i3 EtherH 600A",
826 .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
827 .tx_start_page = ETHERH_TX_START_PAGE,
828 .stop_page = ETHERH_STOP_PAGE,
829};
830
831static const struct ecard_id etherh_ids[] = {
832 { MANU_ANT, PROD_ANT_ETHERM, &etherm_data },
833 { MANU_I3, PROD_I3_ETHERLAN500, &etherlan500_data },
834 { MANU_I3, PROD_I3_ETHERLAN600, &etherlan600_data },
835 { MANU_I3, PROD_I3_ETHERLAN600A, &etherlan600a_data },
836 { 0xffff, 0xffff }
837};
838
839static struct ecard_driver etherh_driver = {
840 .probe = etherh_probe,
841 .remove = __devexit_p(etherh_remove),
842 .id_table = etherh_ids,
843 .drv = {
844 .name = DRV_NAME,
845 },
846};
847
848static int __init etherh_init(void)
849{
850 int i;
851
852 for (i = 0; i < 16; i++) {
853 etherh_regoffsets[i] = i << 2;
854 etherm_regoffsets[i] = i << 5;
855 }
856
857 return ecard_register_driver(&etherh_driver);
858}
859
860static void __exit etherh_exit(void)
861{
862 ecard_remove_driver(&etherh_driver);
863}
864
865module_init(etherh_init);
866module_exit(etherh_exit);
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
new file mode 100644
index 00000000000..de51e8453c1
--- /dev/null
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -0,0 +1,1489 @@
1/*
2 * Intel IXP4xx Ethernet driver for Linux
3 *
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Ethernet port config (0x00 is not present on IXP42X):
11 *
12 * logical port 0x00 0x10 0x20
13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
14 * physical PortId 2 0 1
15 * TX queue 23 24 25
16 * RX-free queue 26 27 28
17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
18 *
19 *
20 * Queue entries:
21 * bits 0 -> 1 - NPE ID (RX and TX-done)
22 * bits 0 -> 2 - priority (TX, per 802.1D)
23 * bits 3 -> 4 - port ID (user-set?)
24 * bits 5 -> 31 - physical descriptor address
25 */
26
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/etherdevice.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/net_tstamp.h>
34#include <linux/phy.h>
35#include <linux/platform_device.h>
36#include <linux/ptp_classify.h>
37#include <linux/slab.h>
38#include <mach/ixp46x_ts.h>
39#include <mach/npe.h>
40#include <mach/qmgr.h>
41
42#define DEBUG_DESC 0
43#define DEBUG_RX 0
44#define DEBUG_TX 0
45#define DEBUG_PKT_BYTES 0
46#define DEBUG_MDIO 0
47#define DEBUG_CLOSE 0
48
49#define DRV_NAME "ixp4xx_eth"
50
51#define MAX_NPES 3
52
53#define RX_DESCS 64 /* also length of all RX queues */
54#define TX_DESCS 16 /* also length of all TX queues */
55#define TXDONE_QUEUE_LEN 64 /* dwords */
56
57#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
58#define REGS_SIZE 0x1000
59#define MAX_MRU 1536 /* 0x600 */
60#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
61
62#define NAPI_WEIGHT 16
63#define MDIO_INTERVAL (3 * HZ)
64#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
65#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
66
67#define NPE_ID(port_id) ((port_id) >> 4)
68#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
69#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
70#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
71#define TXDONE_QUEUE 31
72
73#define PTP_SLAVE_MODE 1
74#define PTP_MASTER_MODE 2
75#define PORT2CHANNEL(p) NPE_ID(p->id)
76
77/* TX Control Registers */
78#define TX_CNTRL0_TX_EN 0x01
79#define TX_CNTRL0_HALFDUPLEX 0x02
80#define TX_CNTRL0_RETRY 0x04
81#define TX_CNTRL0_PAD_EN 0x08
82#define TX_CNTRL0_APPEND_FCS 0x10
83#define TX_CNTRL0_2DEFER 0x20
84#define TX_CNTRL0_RMII 0x40 /* reduced MII */
85#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
86
87/* RX Control Registers */
88#define RX_CNTRL0_RX_EN 0x01
89#define RX_CNTRL0_PADSTRIP_EN 0x02
90#define RX_CNTRL0_SEND_FCS 0x04
91#define RX_CNTRL0_PAUSE_EN 0x08
92#define RX_CNTRL0_LOOP_EN 0x10
93#define RX_CNTRL0_ADDR_FLTR_EN 0x20
94#define RX_CNTRL0_RX_RUNT_EN 0x40
95#define RX_CNTRL0_BCAST_DIS 0x80
96#define RX_CNTRL1_DEFER_EN 0x01
97
98/* Core Control Register */
99#define CORE_RESET 0x01
100#define CORE_RX_FIFO_FLUSH 0x02
101#define CORE_TX_FIFO_FLUSH 0x04
102#define CORE_SEND_JAM 0x08
103#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
104
105#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
106 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
107 TX_CNTRL0_2DEFER)
108#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
109#define DEFAULT_CORE_CNTRL CORE_MDC_EN
110
111
112/* NPE message codes */
113#define NPE_GETSTATUS 0x00
114#define NPE_EDB_SETPORTADDRESS 0x01
115#define NPE_EDB_GETMACADDRESSDATABASE 0x02
116#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
117#define NPE_GETSTATS 0x04
118#define NPE_RESETSTATS 0x05
119#define NPE_SETMAXFRAMELENGTHS 0x06
120#define NPE_VLAN_SETRXTAGMODE 0x07
121#define NPE_VLAN_SETDEFAULTRXVID 0x08
122#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
123#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
124#define NPE_VLAN_SETRXQOSENTRY 0x0B
125#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
126#define NPE_STP_SETBLOCKINGSTATE 0x0D
127#define NPE_FW_SETFIREWALLMODE 0x0E
128#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
129#define NPE_PC_SETAPMACTABLE 0x11
130#define NPE_SETLOOPBACK_MODE 0x12
131#define NPE_PC_SETBSSIDTABLE 0x13
132#define NPE_ADDRESS_FILTER_CONFIG 0x14
133#define NPE_APPENDFCSCONFIG 0x15
134#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
135#define NPE_MAC_RECOVERY_START 0x17
136
137
138#ifdef __ARMEB__
139typedef struct sk_buff buffer_t;
140#define free_buffer dev_kfree_skb
141#define free_buffer_irq dev_kfree_skb_irq
142#else
143typedef void buffer_t;
144#define free_buffer kfree
145#define free_buffer_irq kfree
146#endif
147
148struct eth_regs {
149 u32 tx_control[2], __res1[2]; /* 000 */
150 u32 rx_control[2], __res2[2]; /* 010 */
151 u32 random_seed, __res3[3]; /* 020 */
152 u32 partial_empty_threshold, __res4; /* 030 */
153 u32 partial_full_threshold, __res5; /* 038 */
154 u32 tx_start_bytes, __res6[3]; /* 040 */
155 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
156 u32 tx_2part_deferral[2], __res8[2]; /* 060 */
157 u32 slot_time, __res9[3]; /* 070 */
158 u32 mdio_command[4]; /* 080 */
159 u32 mdio_status[4]; /* 090 */
160 u32 mcast_mask[6], __res10[2]; /* 0A0 */
161 u32 mcast_addr[6], __res11[2]; /* 0C0 */
162 u32 int_clock_threshold, __res12[3]; /* 0E0 */
163 u32 hw_addr[6], __res13[61]; /* 0F0 */
164 u32 core_control; /* 1FC */
165};
166
167struct port {
168 struct resource *mem_res;
169 struct eth_regs __iomem *regs;
170 struct npe *npe;
171 struct net_device *netdev;
172 struct napi_struct napi;
173 struct phy_device *phydev;
174 struct eth_plat_info *plat;
175 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
176 struct desc *desc_tab; /* coherent */
177 u32 desc_tab_phys;
178 int id; /* logical port ID */
179 int speed, duplex;
180 u8 firmware[4];
181 int hwts_tx_en;
182 int hwts_rx_en;
183};
184
185/* NPE message structure */
186struct msg {
187#ifdef __ARMEB__
188 u8 cmd, eth_id, byte2, byte3;
189 u8 byte4, byte5, byte6, byte7;
190#else
191 u8 byte3, byte2, eth_id, cmd;
192 u8 byte7, byte6, byte5, byte4;
193#endif
194};
195
196/* Ethernet packet descriptor */
197struct desc {
198 u32 next; /* pointer to next buffer, unused */
199
200#ifdef __ARMEB__
201 u16 buf_len; /* buffer length */
202 u16 pkt_len; /* packet length */
203 u32 data; /* pointer to data buffer in RAM */
204 u8 dest_id;
205 u8 src_id;
206 u16 flags;
207 u8 qos;
208 u8 padlen;
209 u16 vlan_tci;
210#else
211 u16 pkt_len; /* packet length */
212 u16 buf_len; /* buffer length */
213 u32 data; /* pointer to data buffer in RAM */
214 u16 flags;
215 u8 src_id;
216 u8 dest_id;
217 u16 vlan_tci;
218 u8 padlen;
219 u8 qos;
220#endif
221
222#ifdef __ARMEB__
223 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
224 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
225 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
226#else
227 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
228 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
229 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
230#endif
231};
232
233
234#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
235 (n) * sizeof(struct desc))
236#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
237
238#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
239 ((n) + RX_DESCS) * sizeof(struct desc))
240#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
241
242#ifndef __ARMEB__
243static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
244{
245 int i;
246 for (i = 0; i < cnt; i++)
247 dest[i] = swab32(src[i]);
248}
249#endif
250
251static spinlock_t mdio_lock;
252static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
253static struct mii_bus *mdio_bus;
254static int ports_open;
255static struct port *npe_port_tab[MAX_NPES];
256static struct dma_pool *dma_pool;
257
258static struct sock_filter ptp_filter[] = {
259 PTP_FILTER
260};
261
262static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
263{
264 u8 *data = skb->data;
265 unsigned int offset;
266 u16 *hi, *id;
267 u32 lo;
268
269 if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
270 return 0;
271
272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
273
274 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
275 return 0;
276
277 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
278 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
279
280 memcpy(&lo, &hi[1], sizeof(lo));
281
282 return (uid_hi == ntohs(*hi) &&
283 uid_lo == ntohl(lo) &&
284 seqid == ntohs(*id));
285}
286
287static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
288{
289 struct skb_shared_hwtstamps *shhwtstamps;
290 struct ixp46x_ts_regs *regs;
291 u64 ns;
292 u32 ch, hi, lo, val;
293 u16 uid, seq;
294
295 if (!port->hwts_rx_en)
296 return;
297
298 ch = PORT2CHANNEL(port);
299
300 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
301
302 val = __raw_readl(&regs->channel[ch].ch_event);
303
304 if (!(val & RX_SNAPSHOT_LOCKED))
305 return;
306
307 lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
308 hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
309
310 uid = hi & 0xffff;
311 seq = (hi >> 16) & 0xffff;
312
313 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
314 goto out;
315
316 lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
317 hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
318 ns = ((u64) hi) << 32;
319 ns |= lo;
320 ns <<= TICKS_NS_SHIFT;
321
322 shhwtstamps = skb_hwtstamps(skb);
323 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
324 shhwtstamps->hwtstamp = ns_to_ktime(ns);
325out:
326 __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
327}
328
329static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
330{
331 struct skb_shared_hwtstamps shhwtstamps;
332 struct ixp46x_ts_regs *regs;
333 struct skb_shared_info *shtx;
334 u64 ns;
335 u32 ch, cnt, hi, lo, val;
336
337 shtx = skb_shinfo(skb);
338 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
339 shtx->tx_flags |= SKBTX_IN_PROGRESS;
340 else
341 return;
342
343 ch = PORT2CHANNEL(port);
344
345 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
346
347 /*
348 * This really stinks, but we have to poll for the Tx time stamp.
349 * Usually, the time stamp is ready after 4 to 6 microseconds.
350 */
351 for (cnt = 0; cnt < 100; cnt++) {
352 val = __raw_readl(&regs->channel[ch].ch_event);
353 if (val & TX_SNAPSHOT_LOCKED)
354 break;
355 udelay(1);
356 }
357 if (!(val & TX_SNAPSHOT_LOCKED)) {
358 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
359 return;
360 }
361
362 lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
363 hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
364 ns = ((u64) hi) << 32;
365 ns |= lo;
366 ns <<= TICKS_NS_SHIFT;
367
368 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
369 shhwtstamps.hwtstamp = ns_to_ktime(ns);
370 skb_tstamp_tx(skb, &shhwtstamps);
371
372 __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
373}
374
375static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
376{
377 struct hwtstamp_config cfg;
378 struct ixp46x_ts_regs *regs;
379 struct port *port = netdev_priv(netdev);
380 int ch;
381
382 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
383 return -EFAULT;
384
385 if (cfg.flags) /* reserved for future extensions */
386 return -EINVAL;
387
388 ch = PORT2CHANNEL(port);
389 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
390
391 switch (cfg.tx_type) {
392 case HWTSTAMP_TX_OFF:
393 port->hwts_tx_en = 0;
394 break;
395 case HWTSTAMP_TX_ON:
396 port->hwts_tx_en = 1;
397 break;
398 default:
399 return -ERANGE;
400 }
401
402 switch (cfg.rx_filter) {
403 case HWTSTAMP_FILTER_NONE:
404 port->hwts_rx_en = 0;
405 break;
406 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
407 port->hwts_rx_en = PTP_SLAVE_MODE;
408 __raw_writel(0, &regs->channel[ch].ch_control);
409 break;
410 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
411 port->hwts_rx_en = PTP_MASTER_MODE;
412 __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
413 break;
414 default:
415 return -ERANGE;
416 }
417
418 /* Clear out any old time stamps. */
419 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
420 &regs->channel[ch].ch_event);
421
422 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
423}
424
425static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
426 int write, u16 cmd)
427{
428 int cycles = 0;
429
430 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
431 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
432 return -1;
433 }
434
435 if (write) {
436 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
437 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
438 }
439 __raw_writel(((phy_id << 5) | location) & 0xFF,
440 &mdio_regs->mdio_command[2]);
441 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
442 &mdio_regs->mdio_command[3]);
443
444 while ((cycles < MAX_MDIO_RETRIES) &&
445 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
446 udelay(1);
447 cycles++;
448 }
449
450 if (cycles == MAX_MDIO_RETRIES) {
451 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
452 phy_id);
453 return -1;
454 }
455
456#if DEBUG_MDIO
457 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
458 phy_id, write ? "write" : "read", cycles);
459#endif
460
461 if (write)
462 return 0;
463
464 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
465#if DEBUG_MDIO
466 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
467 phy_id);
468#endif
469 return 0xFFFF; /* don't return error */
470 }
471
472 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
473 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
474}
475
476static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
477{
478 unsigned long flags;
479 int ret;
480
481 spin_lock_irqsave(&mdio_lock, flags);
482 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
483 spin_unlock_irqrestore(&mdio_lock, flags);
484#if DEBUG_MDIO
485 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
486 phy_id, location, ret);
487#endif
488 return ret;
489}
490
491static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
492 u16 val)
493{
494 unsigned long flags;
495 int ret;
496
497 spin_lock_irqsave(&mdio_lock, flags);
498 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
499 spin_unlock_irqrestore(&mdio_lock, flags);
500#if DEBUG_MDIO
501 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
502 bus->name, phy_id, location, val, ret);
503#endif
504 return ret;
505}
506
507static int ixp4xx_mdio_register(void)
508{
509 int err;
510
511 if (!(mdio_bus = mdiobus_alloc()))
512 return -ENOMEM;
513
514 if (cpu_is_ixp43x()) {
515 /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
516 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
517 return -ENODEV;
518 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
519 } else {
520 /* All MII PHY accesses use NPE-B Ethernet registers */
521 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
522 return -ENODEV;
523 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
524 }
525
526 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
527 spin_lock_init(&mdio_lock);
528 mdio_bus->name = "IXP4xx MII Bus";
529 mdio_bus->read = &ixp4xx_mdio_read;
530 mdio_bus->write = &ixp4xx_mdio_write;
531 strcpy(mdio_bus->id, "0");
532
533 if ((err = mdiobus_register(mdio_bus)))
534 mdiobus_free(mdio_bus);
535 return err;
536}
537
538static void ixp4xx_mdio_remove(void)
539{
540 mdiobus_unregister(mdio_bus);
541 mdiobus_free(mdio_bus);
542}
543
544
545static void ixp4xx_adjust_link(struct net_device *dev)
546{
547 struct port *port = netdev_priv(dev);
548 struct phy_device *phydev = port->phydev;
549
550 if (!phydev->link) {
551 if (port->speed) {
552 port->speed = 0;
553 printk(KERN_INFO "%s: link down\n", dev->name);
554 }
555 return;
556 }
557
558 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
559 return;
560
561 port->speed = phydev->speed;
562 port->duplex = phydev->duplex;
563
564 if (port->duplex)
565 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
566 &port->regs->tx_control[0]);
567 else
568 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
569 &port->regs->tx_control[0]);
570
571 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
572 dev->name, port->speed, port->duplex ? "full" : "half");
573}
574
575
576static inline void debug_pkt(struct net_device *dev, const char *func,
577 u8 *data, int len)
578{
579#if DEBUG_PKT_BYTES
580 int i;
581
582 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
583 for (i = 0; i < len; i++) {
584 if (i >= DEBUG_PKT_BYTES)
585 break;
586 printk("%s%02X",
587 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
588 data[i]);
589 }
590 printk("\n");
591#endif
592}
593
594
595static inline void debug_desc(u32 phys, struct desc *desc)
596{
597#if DEBUG_DESC
598 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
599 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
600 phys, desc->next, desc->buf_len, desc->pkt_len,
601 desc->data, desc->dest_id, desc->src_id, desc->flags,
602 desc->qos, desc->padlen, desc->vlan_tci,
603 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
604 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
605 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
606 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
607#endif
608}
609
610static inline int queue_get_desc(unsigned int queue, struct port *port,
611 int is_tx)
612{
613 u32 phys, tab_phys, n_desc;
614 struct desc *tab;
615
616 if (!(phys = qmgr_get_entry(queue)))
617 return -1;
618
619 phys &= ~0x1F; /* mask out non-address bits */
620 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
621 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
622 n_desc = (phys - tab_phys) / sizeof(struct desc);
623 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
624 debug_desc(phys, &tab[n_desc]);
625 BUG_ON(tab[n_desc].next);
626 return n_desc;
627}
628
629static inline void queue_put_desc(unsigned int queue, u32 phys,
630 struct desc *desc)
631{
632 debug_desc(phys, desc);
633 BUG_ON(phys & 0x1F);
634 qmgr_put_entry(queue, phys);
635 /* Don't check for queue overflow here, we've allocated sufficient
636 length and queues >= 32 don't support this check anyway. */
637}
638
639
640static inline void dma_unmap_tx(struct port *port, struct desc *desc)
641{
642#ifdef __ARMEB__
643 dma_unmap_single(&port->netdev->dev, desc->data,
644 desc->buf_len, DMA_TO_DEVICE);
645#else
646 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
647 ALIGN((desc->data & 3) + desc->buf_len, 4),
648 DMA_TO_DEVICE);
649#endif
650}
651
652
653static void eth_rx_irq(void *pdev)
654{
655 struct net_device *dev = pdev;
656 struct port *port = netdev_priv(dev);
657
658#if DEBUG_RX
659 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
660#endif
661 qmgr_disable_irq(port->plat->rxq);
662 napi_schedule(&port->napi);
663}
664
665static int eth_poll(struct napi_struct *napi, int budget)
666{
667 struct port *port = container_of(napi, struct port, napi);
668 struct net_device *dev = port->netdev;
669 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
670 int received = 0;
671
672#if DEBUG_RX
673 printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
674#endif
675
676 while (received < budget) {
677 struct sk_buff *skb;
678 struct desc *desc;
679 int n;
680#ifdef __ARMEB__
681 struct sk_buff *temp;
682 u32 phys;
683#endif
684
685 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
686#if DEBUG_RX
687 printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
688 dev->name);
689#endif
690 napi_complete(napi);
691 qmgr_enable_irq(rxq);
692 if (!qmgr_stat_below_low_watermark(rxq) &&
693 napi_reschedule(napi)) { /* not empty again */
694#if DEBUG_RX
695 printk(KERN_DEBUG "%s: eth_poll"
696 " napi_reschedule successed\n",
697 dev->name);
698#endif
699 qmgr_disable_irq(rxq);
700 continue;
701 }
702#if DEBUG_RX
703 printk(KERN_DEBUG "%s: eth_poll all done\n",
704 dev->name);
705#endif
706 return received; /* all work done */
707 }
708
709 desc = rx_desc_ptr(port, n);
710
711#ifdef __ARMEB__
712 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
713 phys = dma_map_single(&dev->dev, skb->data,
714 RX_BUFF_SIZE, DMA_FROM_DEVICE);
715 if (dma_mapping_error(&dev->dev, phys)) {
716 dev_kfree_skb(skb);
717 skb = NULL;
718 }
719 }
720#else
721 skb = netdev_alloc_skb(dev,
722 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
723#endif
724
725 if (!skb) {
726 dev->stats.rx_dropped++;
727 /* put the desc back on RX-ready queue */
728 desc->buf_len = MAX_MRU;
729 desc->pkt_len = 0;
730 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
731 continue;
732 }
733
734 /* process received frame */
735#ifdef __ARMEB__
736 temp = skb;
737 skb = port->rx_buff_tab[n];
738 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
739 RX_BUFF_SIZE, DMA_FROM_DEVICE);
740#else
741 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
742 RX_BUFF_SIZE, DMA_FROM_DEVICE);
743 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
744 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
745#endif
746 skb_reserve(skb, NET_IP_ALIGN);
747 skb_put(skb, desc->pkt_len);
748
749 debug_pkt(dev, "eth_poll", skb->data, skb->len);
750
751 ixp_rx_timestamp(port, skb);
752 skb->protocol = eth_type_trans(skb, dev);
753 dev->stats.rx_packets++;
754 dev->stats.rx_bytes += skb->len;
755 netif_receive_skb(skb);
756
757 /* put the new buffer on RX-free queue */
758#ifdef __ARMEB__
759 port->rx_buff_tab[n] = temp;
760 desc->data = phys + NET_IP_ALIGN;
761#endif
762 desc->buf_len = MAX_MRU;
763 desc->pkt_len = 0;
764 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
765 received++;
766 }
767
768#if DEBUG_RX
769 printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
770#endif
771 return received; /* not all work done */
772}
773
774
775static void eth_txdone_irq(void *unused)
776{
777 u32 phys;
778
779#if DEBUG_TX
780 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
781#endif
782 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
783 u32 npe_id, n_desc;
784 struct port *port;
785 struct desc *desc;
786 int start;
787
788 npe_id = phys & 3;
789 BUG_ON(npe_id >= MAX_NPES);
790 port = npe_port_tab[npe_id];
791 BUG_ON(!port);
792 phys &= ~0x1F; /* mask out non-address bits */
793 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
794 BUG_ON(n_desc >= TX_DESCS);
795 desc = tx_desc_ptr(port, n_desc);
796 debug_desc(phys, desc);
797
798 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
799 port->netdev->stats.tx_packets++;
800 port->netdev->stats.tx_bytes += desc->pkt_len;
801
802 dma_unmap_tx(port, desc);
803#if DEBUG_TX
804 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
805 port->netdev->name, port->tx_buff_tab[n_desc]);
806#endif
807 free_buffer_irq(port->tx_buff_tab[n_desc]);
808 port->tx_buff_tab[n_desc] = NULL;
809 }
810
811 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
812 queue_put_desc(port->plat->txreadyq, phys, desc);
813 if (start) { /* TX-ready queue was empty */
814#if DEBUG_TX
815 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
816 port->netdev->name);
817#endif
818 netif_wake_queue(port->netdev);
819 }
820 }
821}
822
823static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
824{
825 struct port *port = netdev_priv(dev);
826 unsigned int txreadyq = port->plat->txreadyq;
827 int len, offset, bytes, n;
828 void *mem;
829 u32 phys;
830 struct desc *desc;
831
832#if DEBUG_TX
833 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
834#endif
835
836 if (unlikely(skb->len > MAX_MRU)) {
837 dev_kfree_skb(skb);
838 dev->stats.tx_errors++;
839 return NETDEV_TX_OK;
840 }
841
842 debug_pkt(dev, "eth_xmit", skb->data, skb->len);
843
844 len = skb->len;
845#ifdef __ARMEB__
846 offset = 0; /* no need to keep alignment */
847 bytes = len;
848 mem = skb->data;
849#else
850 offset = (int)skb->data & 3; /* keep 32-bit alignment */
851 bytes = ALIGN(offset + len, 4);
852 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
853 dev_kfree_skb(skb);
854 dev->stats.tx_dropped++;
855 return NETDEV_TX_OK;
856 }
857 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
858#endif
859
860 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
861 if (dma_mapping_error(&dev->dev, phys)) {
862 dev_kfree_skb(skb);
863#ifndef __ARMEB__
864 kfree(mem);
865#endif
866 dev->stats.tx_dropped++;
867 return NETDEV_TX_OK;
868 }
869
870 n = queue_get_desc(txreadyq, port, 1);
871 BUG_ON(n < 0);
872 desc = tx_desc_ptr(port, n);
873
874#ifdef __ARMEB__
875 port->tx_buff_tab[n] = skb;
876#else
877 port->tx_buff_tab[n] = mem;
878#endif
879 desc->data = phys + offset;
880 desc->buf_len = desc->pkt_len = len;
881
882 /* NPE firmware pads short frames with zeros internally */
883 wmb();
884 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
885
886 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
887#if DEBUG_TX
888 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
889#endif
890 netif_stop_queue(dev);
891 /* we could miss TX ready interrupt */
892 /* really empty in fact */
893 if (!qmgr_stat_below_low_watermark(txreadyq)) {
894#if DEBUG_TX
895 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
896 dev->name);
897#endif
898 netif_wake_queue(dev);
899 }
900 }
901
902#if DEBUG_TX
903 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
904#endif
905
906 ixp_tx_timestamp(port, skb);
907 skb_tx_timestamp(skb);
908
909#ifndef __ARMEB__
910 dev_kfree_skb(skb);
911#endif
912 return NETDEV_TX_OK;
913}
914
915
916static void eth_set_mcast_list(struct net_device *dev)
917{
918 struct port *port = netdev_priv(dev);
919 struct netdev_hw_addr *ha;
920 u8 diffs[ETH_ALEN], *addr;
921 int i;
922 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
923
924 if (dev->flags & IFF_ALLMULTI) {
925 for (i = 0; i < ETH_ALEN; i++) {
926 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
927 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
928 }
929 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
930 &port->regs->rx_control[0]);
931 return;
932 }
933
934 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
935 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
936 &port->regs->rx_control[0]);
937 return;
938 }
939
940 memset(diffs, 0, ETH_ALEN);
941
942 addr = NULL;
943 netdev_for_each_mc_addr(ha, dev) {
944 if (!addr)
945 addr = ha->addr; /* first MAC address */
946 for (i = 0; i < ETH_ALEN; i++)
947 diffs[i] |= addr[i] ^ ha->addr[i];
948 }
949
950 for (i = 0; i < ETH_ALEN; i++) {
951 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
952 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
953 }
954
955 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
956 &port->regs->rx_control[0]);
957}
958
959
960static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
961{
962 struct port *port = netdev_priv(dev);
963
964 if (!netif_running(dev))
965 return -EINVAL;
966
967 if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
968 return hwtstamp_ioctl(dev, req, cmd);
969
970 return phy_mii_ioctl(port->phydev, req, cmd);
971}
972
973/* ethtool support */
974
975static void ixp4xx_get_drvinfo(struct net_device *dev,
976 struct ethtool_drvinfo *info)
977{
978 struct port *port = netdev_priv(dev);
979 strcpy(info->driver, DRV_NAME);
980 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
981 port->firmware[0], port->firmware[1],
982 port->firmware[2], port->firmware[3]);
983 strcpy(info->bus_info, "internal");
984}
985
986static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
987{
988 struct port *port = netdev_priv(dev);
989 return phy_ethtool_gset(port->phydev, cmd);
990}
991
992static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
993{
994 struct port *port = netdev_priv(dev);
995 return phy_ethtool_sset(port->phydev, cmd);
996}
997
998static int ixp4xx_nway_reset(struct net_device *dev)
999{
1000 struct port *port = netdev_priv(dev);
1001 return phy_start_aneg(port->phydev);
1002}
1003
1004static const struct ethtool_ops ixp4xx_ethtool_ops = {
1005 .get_drvinfo = ixp4xx_get_drvinfo,
1006 .get_settings = ixp4xx_get_settings,
1007 .set_settings = ixp4xx_set_settings,
1008 .nway_reset = ixp4xx_nway_reset,
1009 .get_link = ethtool_op_get_link,
1010};
1011
1012
1013static int request_queues(struct port *port)
1014{
1015 int err;
1016
1017 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
1018 "%s:RX-free", port->netdev->name);
1019 if (err)
1020 return err;
1021
1022 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
1023 "%s:RX", port->netdev->name);
1024 if (err)
1025 goto rel_rxfree;
1026
1027 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
1028 "%s:TX", port->netdev->name);
1029 if (err)
1030 goto rel_rx;
1031
1032 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
1033 "%s:TX-ready", port->netdev->name);
1034 if (err)
1035 goto rel_tx;
1036
1037 /* TX-done queue handles skbs sent out by the NPEs */
1038 if (!ports_open) {
1039 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
1040 "%s:TX-done", DRV_NAME);
1041 if (err)
1042 goto rel_txready;
1043 }
1044 return 0;
1045
1046rel_txready:
1047 qmgr_release_queue(port->plat->txreadyq);
1048rel_tx:
1049 qmgr_release_queue(TX_QUEUE(port->id));
1050rel_rx:
1051 qmgr_release_queue(port->plat->rxq);
1052rel_rxfree:
1053 qmgr_release_queue(RXFREE_QUEUE(port->id));
1054 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
1055 port->netdev->name);
1056 return err;
1057}
1058
1059static void release_queues(struct port *port)
1060{
1061 qmgr_release_queue(RXFREE_QUEUE(port->id));
1062 qmgr_release_queue(port->plat->rxq);
1063 qmgr_release_queue(TX_QUEUE(port->id));
1064 qmgr_release_queue(port->plat->txreadyq);
1065
1066 if (!ports_open)
1067 qmgr_release_queue(TXDONE_QUEUE);
1068}
1069
1070static int init_queues(struct port *port)
1071{
1072 int i;
1073
1074 if (!ports_open)
1075 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
1076 POOL_ALLOC_SIZE, 32, 0)))
1077 return -ENOMEM;
1078
1079 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
1080 &port->desc_tab_phys)))
1081 return -ENOMEM;
1082 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
1083 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
1084 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
1085
1086 /* Setup RX buffers */
1087 for (i = 0; i < RX_DESCS; i++) {
1088 struct desc *desc = rx_desc_ptr(port, i);
1089 buffer_t *buff; /* skb or kmalloc()ated memory */
1090 void *data;
1091#ifdef __ARMEB__
1092 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
1093 return -ENOMEM;
1094 data = buff->data;
1095#else
1096 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
1097 return -ENOMEM;
1098 data = buff;
1099#endif
1100 desc->buf_len = MAX_MRU;
1101 desc->data = dma_map_single(&port->netdev->dev, data,
1102 RX_BUFF_SIZE, DMA_FROM_DEVICE);
1103 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1104 free_buffer(buff);
1105 return -EIO;
1106 }
1107 desc->data += NET_IP_ALIGN;
1108 port->rx_buff_tab[i] = buff;
1109 }
1110
1111 return 0;
1112}
1113
1114static void destroy_queues(struct port *port)
1115{
1116 int i;
1117
1118 if (port->desc_tab) {
1119 for (i = 0; i < RX_DESCS; i++) {
1120 struct desc *desc = rx_desc_ptr(port, i);
1121 buffer_t *buff = port->rx_buff_tab[i];
1122 if (buff) {
1123 dma_unmap_single(&port->netdev->dev,
1124 desc->data - NET_IP_ALIGN,
1125 RX_BUFF_SIZE, DMA_FROM_DEVICE);
1126 free_buffer(buff);
1127 }
1128 }
1129 for (i = 0; i < TX_DESCS; i++) {
1130 struct desc *desc = tx_desc_ptr(port, i);
1131 buffer_t *buff = port->tx_buff_tab[i];
1132 if (buff) {
1133 dma_unmap_tx(port, desc);
1134 free_buffer(buff);
1135 }
1136 }
1137 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1138 port->desc_tab = NULL;
1139 }
1140
1141 if (!ports_open && dma_pool) {
1142 dma_pool_destroy(dma_pool);
1143 dma_pool = NULL;
1144 }
1145}
1146
1147static int eth_open(struct net_device *dev)
1148{
1149 struct port *port = netdev_priv(dev);
1150 struct npe *npe = port->npe;
1151 struct msg msg;
1152 int i, err;
1153
1154 if (!npe_running(npe)) {
1155 err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
1156 if (err)
1157 return err;
1158
1159 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
1160 printk(KERN_ERR "%s: %s not responding\n", dev->name,
1161 npe_name(npe));
1162 return -EIO;
1163 }
1164 port->firmware[0] = msg.byte4;
1165 port->firmware[1] = msg.byte5;
1166 port->firmware[2] = msg.byte6;
1167 port->firmware[3] = msg.byte7;
1168 }
1169
1170 memset(&msg, 0, sizeof(msg));
1171 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
1172 msg.eth_id = port->id;
1173 msg.byte5 = port->plat->rxq | 0x80;
1174 msg.byte7 = port->plat->rxq << 4;
1175 for (i = 0; i < 8; i++) {
1176 msg.byte3 = i;
1177 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
1178 return -EIO;
1179 }
1180
1181 msg.cmd = NPE_EDB_SETPORTADDRESS;
1182 msg.eth_id = PHYSICAL_ID(port->id);
1183 msg.byte2 = dev->dev_addr[0];
1184 msg.byte3 = dev->dev_addr[1];
1185 msg.byte4 = dev->dev_addr[2];
1186 msg.byte5 = dev->dev_addr[3];
1187 msg.byte6 = dev->dev_addr[4];
1188 msg.byte7 = dev->dev_addr[5];
1189 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
1190 return -EIO;
1191
1192 memset(&msg, 0, sizeof(msg));
1193 msg.cmd = NPE_FW_SETFIREWALLMODE;
1194 msg.eth_id = port->id;
1195 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
1196 return -EIO;
1197
1198 if ((err = request_queues(port)) != 0)
1199 return err;
1200
1201 if ((err = init_queues(port)) != 0) {
1202 destroy_queues(port);
1203 release_queues(port);
1204 return err;
1205 }
1206
1207 port->speed = 0; /* force "link up" message */
1208 phy_start(port->phydev);
1209
1210 for (i = 0; i < ETH_ALEN; i++)
1211 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
1212 __raw_writel(0x08, &port->regs->random_seed);
1213 __raw_writel(0x12, &port->regs->partial_empty_threshold);
1214 __raw_writel(0x30, &port->regs->partial_full_threshold);
1215 __raw_writel(0x08, &port->regs->tx_start_bytes);
1216 __raw_writel(0x15, &port->regs->tx_deferral);
1217 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
1218 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
1219 __raw_writel(0x80, &port->regs->slot_time);
1220 __raw_writel(0x01, &port->regs->int_clock_threshold);
1221
1222 /* Populate queues with buffers, no failure after this point */
1223 for (i = 0; i < TX_DESCS; i++)
1224 queue_put_desc(port->plat->txreadyq,
1225 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1226
1227 for (i = 0; i < RX_DESCS; i++)
1228 queue_put_desc(RXFREE_QUEUE(port->id),
1229 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1230
1231 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1232 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1233 __raw_writel(0, &port->regs->rx_control[1]);
1234 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1235
1236 napi_enable(&port->napi);
1237 eth_set_mcast_list(dev);
1238 netif_start_queue(dev);
1239
1240 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1241 eth_rx_irq, dev);
1242 if (!ports_open) {
1243 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1244 eth_txdone_irq, NULL);
1245 qmgr_enable_irq(TXDONE_QUEUE);
1246 }
1247 ports_open++;
1248 /* we may already have RX data, enables IRQ */
1249 napi_schedule(&port->napi);
1250 return 0;
1251}
1252
1253static int eth_close(struct net_device *dev)
1254{
1255 struct port *port = netdev_priv(dev);
1256 struct msg msg;
1257 int buffs = RX_DESCS; /* allocated RX buffers */
1258 int i;
1259
1260 ports_open--;
1261 qmgr_disable_irq(port->plat->rxq);
1262 napi_disable(&port->napi);
1263 netif_stop_queue(dev);
1264
1265 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1266 buffs--;
1267
1268 memset(&msg, 0, sizeof(msg));
1269 msg.cmd = NPE_SETLOOPBACK_MODE;
1270 msg.eth_id = port->id;
1271 msg.byte3 = 1;
1272 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1273 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1274
1275 i = 0;
1276 do { /* drain RX buffers */
1277 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1278 buffs--;
1279 if (!buffs)
1280 break;
1281 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1282 /* we have to inject some packet */
1283 struct desc *desc;
1284 u32 phys;
1285 int n = queue_get_desc(port->plat->txreadyq, port, 1);
1286 BUG_ON(n < 0);
1287 desc = tx_desc_ptr(port, n);
1288 phys = tx_desc_phys(port, n);
1289 desc->buf_len = desc->pkt_len = 1;
1290 wmb();
1291 queue_put_desc(TX_QUEUE(port->id), phys, desc);
1292 }
1293 udelay(1);
1294 } while (++i < MAX_CLOSE_WAIT);
1295
1296 if (buffs)
1297 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1298 " left in NPE\n", dev->name, buffs);
1299#if DEBUG_CLOSE
1300 if (!buffs)
1301 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1302#endif
1303
1304 buffs = TX_DESCS;
1305 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1306 buffs--; /* cancel TX */
1307
1308 i = 0;
1309 do {
1310 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1311 buffs--;
1312 if (!buffs)
1313 break;
1314 } while (++i < MAX_CLOSE_WAIT);
1315
1316 if (buffs)
1317 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1318 "left in NPE\n", dev->name, buffs);
1319#if DEBUG_CLOSE
1320 if (!buffs)
1321 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1322#endif
1323
1324 msg.byte3 = 0;
1325 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1326 printk(KERN_CRIT "%s: unable to disable loopback\n",
1327 dev->name);
1328
1329 phy_stop(port->phydev);
1330
1331 if (!ports_open)
1332 qmgr_disable_irq(TXDONE_QUEUE);
1333 destroy_queues(port);
1334 release_queues(port);
1335 return 0;
1336}
1337
1338static const struct net_device_ops ixp4xx_netdev_ops = {
1339 .ndo_open = eth_open,
1340 .ndo_stop = eth_close,
1341 .ndo_start_xmit = eth_xmit,
1342 .ndo_set_multicast_list = eth_set_mcast_list,
1343 .ndo_do_ioctl = eth_ioctl,
1344 .ndo_change_mtu = eth_change_mtu,
1345 .ndo_set_mac_address = eth_mac_addr,
1346 .ndo_validate_addr = eth_validate_addr,
1347};
1348
1349static int __devinit eth_init_one(struct platform_device *pdev)
1350{
1351 struct port *port;
1352 struct net_device *dev;
1353 struct eth_plat_info *plat = pdev->dev.platform_data;
1354 u32 regs_phys;
1355 char phy_id[MII_BUS_ID_SIZE + 3];
1356 int err;
1357
1358 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
1359 pr_err("ixp4xx_eth: bad ptp filter\n");
1360 return -EINVAL;
1361 }
1362
1363 if (!(dev = alloc_etherdev(sizeof(struct port))))
1364 return -ENOMEM;
1365
1366 SET_NETDEV_DEV(dev, &pdev->dev);
1367 port = netdev_priv(dev);
1368 port->netdev = dev;
1369 port->id = pdev->id;
1370
1371 switch (port->id) {
1372 case IXP4XX_ETH_NPEA:
1373 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1374 regs_phys = IXP4XX_EthA_BASE_PHYS;
1375 break;
1376 case IXP4XX_ETH_NPEB:
1377 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1378 regs_phys = IXP4XX_EthB_BASE_PHYS;
1379 break;
1380 case IXP4XX_ETH_NPEC:
1381 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1382 regs_phys = IXP4XX_EthC_BASE_PHYS;
1383 break;
1384 default:
1385 err = -ENODEV;
1386 goto err_free;
1387 }
1388
1389 dev->netdev_ops = &ixp4xx_netdev_ops;
1390 dev->ethtool_ops = &ixp4xx_ethtool_ops;
1391 dev->tx_queue_len = 100;
1392
1393 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1394
1395 if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1396 err = -EIO;
1397 goto err_free;
1398 }
1399
1400 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1401 if (!port->mem_res) {
1402 err = -EBUSY;
1403 goto err_npe_rel;
1404 }
1405
1406 port->plat = plat;
1407 npe_port_tab[NPE_ID(port->id)] = port;
1408 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1409
1410 platform_set_drvdata(pdev, dev);
1411
1412 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1413 &port->regs->core_control);
1414 udelay(50);
1415 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1416 udelay(50);
1417
1418 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
1419 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1420 PHY_INTERFACE_MODE_MII);
1421 if (IS_ERR(port->phydev)) {
1422 err = PTR_ERR(port->phydev);
1423 goto err_free_mem;
1424 }
1425
1426 port->phydev->irq = PHY_POLL;
1427
1428 if ((err = register_netdev(dev)))
1429 goto err_phy_dis;
1430
1431 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1432 npe_name(port->npe));
1433
1434 return 0;
1435
1436err_phy_dis:
1437 phy_disconnect(port->phydev);
1438err_free_mem:
1439 npe_port_tab[NPE_ID(port->id)] = NULL;
1440 platform_set_drvdata(pdev, NULL);
1441 release_resource(port->mem_res);
1442err_npe_rel:
1443 npe_release(port->npe);
1444err_free:
1445 free_netdev(dev);
1446 return err;
1447}
1448
1449static int __devexit eth_remove_one(struct platform_device *pdev)
1450{
1451 struct net_device *dev = platform_get_drvdata(pdev);
1452 struct port *port = netdev_priv(dev);
1453
1454 unregister_netdev(dev);
1455 phy_disconnect(port->phydev);
1456 npe_port_tab[NPE_ID(port->id)] = NULL;
1457 platform_set_drvdata(pdev, NULL);
1458 npe_release(port->npe);
1459 release_resource(port->mem_res);
1460 free_netdev(dev);
1461 return 0;
1462}
1463
1464static struct platform_driver ixp4xx_eth_driver = {
1465 .driver.name = DRV_NAME,
1466 .probe = eth_init_one,
1467 .remove = eth_remove_one,
1468};
1469
1470static int __init eth_init_module(void)
1471{
1472 int err;
1473 if ((err = ixp4xx_mdio_register()))
1474 return err;
1475 return platform_driver_register(&ixp4xx_eth_driver);
1476}
1477
1478static void __exit eth_cleanup_module(void)
1479{
1480 platform_driver_unregister(&ixp4xx_eth_driver);
1481 ixp4xx_mdio_remove();
1482}
1483
1484MODULE_AUTHOR("Krzysztof Halasa");
1485MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1486MODULE_LICENSE("GPL v2");
1487MODULE_ALIAS("platform:ixp4xx_eth");
1488module_init(eth_init_module);
1489module_exit(eth_cleanup_module);
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
new file mode 100644
index 00000000000..c827a6097d0
--- /dev/null
+++ b/drivers/net/arm/ks8695net.c
@@ -0,0 +1,1656 @@
1/*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Copyright 2008 Simtec Electronics
15 * Daniel Silverstone <dsilvers@simtec.co.uk>
16 * Vincent Sanders <vince@simtec.co.uk>
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/platform_device.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/slab.h>
36
37#include <asm/irq.h>
38
39#include <mach/regs-switch.h>
40#include <mach/regs-misc.h>
41#include <asm/mach/irq.h>
42#include <mach/regs-irq.h>
43
44#include "ks8695net.h"
45
46#define MODULENAME "ks8695_ether"
47#define MODULEVERSION "1.02"
48
49/*
50 * Transmit and device reset timeout, default 5 seconds.
51 */
52static int watchdog = 5000;
53
54/* Hardware structures */
55
56/**
57 * struct rx_ring_desc - Receive descriptor ring element
58 * @status: The status of the descriptor element (E.g. who owns it)
59 * @length: The number of bytes in the block pointed to by data_ptr
60 * @data_ptr: The physical address of the data block to receive into
61 * @next_desc: The physical address of the next descriptor element.
62 */
63struct rx_ring_desc {
64 __le32 status;
65 __le32 length;
66 __le32 data_ptr;
67 __le32 next_desc;
68};
69
70/**
71 * struct tx_ring_desc - Transmit descriptor ring element
72 * @owner: Who owns the descriptor
73 * @status: The number of bytes in the block pointed to by data_ptr
74 * @data_ptr: The physical address of the data block to receive into
75 * @next_desc: The physical address of the next descriptor element.
76 */
77struct tx_ring_desc {
78 __le32 owner;
79 __le32 status;
80 __le32 data_ptr;
81 __le32 next_desc;
82};
83
84/**
85 * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
86 * @skb: The buffer in the ring
87 * @dma_ptr: The mapped DMA pointer of the buffer
88 * @length: The number of bytes mapped to dma_ptr
89 */
90struct ks8695_skbuff {
91 struct sk_buff *skb;
92 dma_addr_t dma_ptr;
93 u32 length;
94};
95
96/* Private device structure */
97
98#define MAX_TX_DESC 8
99#define MAX_TX_DESC_MASK 0x7
100#define MAX_RX_DESC 16
101#define MAX_RX_DESC_MASK 0xf
102
103/*napi_weight have better more than rx DMA buffers*/
104#define NAPI_WEIGHT 64
105
106#define MAX_RXBUF_SIZE 0x700
107
108#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
109#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
110#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
111
112/**
113 * enum ks8695_dtype - Device type
114 * @KS8695_DTYPE_WAN: This device is a WAN interface
115 * @KS8695_DTYPE_LAN: This device is a LAN interface
116 * @KS8695_DTYPE_HPNA: This device is an HPNA interface
117 */
118enum ks8695_dtype {
119 KS8695_DTYPE_WAN,
120 KS8695_DTYPE_LAN,
121 KS8695_DTYPE_HPNA,
122};
123
124/**
125 * struct ks8695_priv - Private data for the KS8695 Ethernet
126 * @in_suspend: Flag to indicate if we're suspending/resuming
127 * @ndev: The net_device for this interface
128 * @dev: The platform device object for this interface
129 * @dtype: The type of this device
130 * @io_regs: The ioremapped registers for this interface
131 * @napi : Add support NAPI for Rx
132 * @rx_irq_name: The textual name of the RX IRQ from the platform data
133 * @tx_irq_name: The textual name of the TX IRQ from the platform data
134 * @link_irq_name: The textual name of the link IRQ from the
135 * platform data if available
136 * @rx_irq: The IRQ number for the RX IRQ
137 * @tx_irq: The IRQ number for the TX IRQ
138 * @link_irq: The IRQ number for the link IRQ if available
139 * @regs_req: The resource request for the registers region
140 * @phyiface_req: The resource request for the phy/switch region
141 * if available
142 * @phyiface_regs: The ioremapped registers for the phy/switch if available
143 * @ring_base: The base pointer of the dma coherent memory for the rings
144 * @ring_base_dma: The DMA mapped equivalent of ring_base
145 * @tx_ring: The pointer in ring_base of the TX ring
146 * @tx_ring_used: The number of slots in the TX ring which are occupied
147 * @tx_ring_next_slot: The next slot to fill in the TX ring
148 * @tx_ring_dma: The DMA mapped equivalent of tx_ring
149 * @tx_buffers: The sk_buff mappings for the TX ring
150 * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
151 * @rx_ring: The pointer in ring_base of the RX ring
152 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
153 * @rx_buffers: The sk_buff mappings for the RX ring
154 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
155 * @rx_lock: A lock to protect Rx irq function
156 * @msg_enable: The flags for which messages to emit
157 */
158struct ks8695_priv {
159 int in_suspend;
160 struct net_device *ndev;
161 struct device *dev;
162 enum ks8695_dtype dtype;
163 void __iomem *io_regs;
164
165 struct napi_struct napi;
166
167 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
168 int rx_irq, tx_irq, link_irq;
169
170 struct resource *regs_req, *phyiface_req;
171 void __iomem *phyiface_regs;
172
173 void *ring_base;
174 dma_addr_t ring_base_dma;
175
176 struct tx_ring_desc *tx_ring;
177 int tx_ring_used;
178 int tx_ring_next_slot;
179 dma_addr_t tx_ring_dma;
180 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
181 spinlock_t txq_lock;
182
183 struct rx_ring_desc *rx_ring;
184 dma_addr_t rx_ring_dma;
185 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
186 int next_rx_desc_read;
187 spinlock_t rx_lock;
188
189 int msg_enable;
190};
191
192/* Register access */
193
194/**
195 * ks8695_readreg - Read from a KS8695 ethernet register
196 * @ksp: The device to read from
197 * @reg: The register to read
198 */
199static inline u32
200ks8695_readreg(struct ks8695_priv *ksp, int reg)
201{
202 return readl(ksp->io_regs + reg);
203}
204
205/**
206 * ks8695_writereg - Write to a KS8695 ethernet register
207 * @ksp: The device to write to
208 * @reg: The register to write
209 * @value: The value to write to the register
210 */
211static inline void
212ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
213{
214 writel(value, ksp->io_regs + reg);
215}
216
217/* Utility functions */
218
219/**
220 * ks8695_port_type - Retrieve port-type as user-friendly string
221 * @ksp: The device to return the type for
222 *
223 * Returns a string indicating which of the WAN, LAN or HPNA
224 * ports this device is likely to represent.
225 */
226static const char *
227ks8695_port_type(struct ks8695_priv *ksp)
228{
229 switch (ksp->dtype) {
230 case KS8695_DTYPE_LAN:
231 return "LAN";
232 case KS8695_DTYPE_WAN:
233 return "WAN";
234 case KS8695_DTYPE_HPNA:
235 return "HPNA";
236 }
237
238 return "UNKNOWN";
239}
240
241/**
242 * ks8695_update_mac - Update the MAC registers in the device
243 * @ksp: The device to update
244 *
245 * Updates the MAC registers in the KS8695 device from the address in the
246 * net_device structure associated with this interface.
247 */
248static void
249ks8695_update_mac(struct ks8695_priv *ksp)
250{
251 /* Update the HW with the MAC from the net_device */
252 struct net_device *ndev = ksp->ndev;
253 u32 machigh, maclow;
254
255 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
256 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
257 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
258
259 ks8695_writereg(ksp, KS8695_MAL, maclow);
260 ks8695_writereg(ksp, KS8695_MAH, machigh);
261
262}
263
264/**
265 * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
266 * @ksp: The device to refill
267 *
268 * Iterates the RX ring of the device looking for empty slots.
269 * For each empty slot, we allocate and map a new SKB and give it
270 * to the hardware.
271 * This can be called from interrupt context safely.
272 */
273static void
274ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
275{
276 /* Run around the RX ring, filling in any missing sk_buff's */
277 int buff_n;
278
279 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
280 if (!ksp->rx_buffers[buff_n].skb) {
281 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
282 dma_addr_t mapping;
283
284 ksp->rx_buffers[buff_n].skb = skb;
285 if (skb == NULL) {
286 /* Failed to allocate one, perhaps
287 * we'll try again later.
288 */
289 break;
290 }
291
292 mapping = dma_map_single(ksp->dev, skb->data,
293 MAX_RXBUF_SIZE,
294 DMA_FROM_DEVICE);
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
296 /* Failed to DMA map this SKB, try later */
297 dev_kfree_skb_irq(skb);
298 ksp->rx_buffers[buff_n].skb = NULL;
299 break;
300 }
301 ksp->rx_buffers[buff_n].dma_ptr = mapping;
302 skb->dev = ksp->ndev;
303 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
304
305 /* Record this into the DMA ring */
306 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
307 ksp->rx_ring[buff_n].length =
308 cpu_to_le32(MAX_RXBUF_SIZE);
309
310 wmb();
311
312 /* And give ownership over to the hardware */
313 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
314 }
315 }
316}
317
318/* Maximum number of multicast addresses which the KS8695 HW supports */
319#define KS8695_NR_ADDRESSES 16
320
321/**
322 * ks8695_init_partial_multicast - Init the mcast addr registers
323 * @ksp: The device to initialise
324 * @addr: The multicast address list to use
325 * @nr_addr: The number of addresses in the list
326 *
327 * This routine is a helper for ks8695_set_multicast - it writes
328 * the additional-address registers in the KS8695 ethernet device
329 * and cleans up any others left behind.
330 */
331static void
332ks8695_init_partial_multicast(struct ks8695_priv *ksp,
333 struct net_device *ndev)
334{
335 u32 low, high;
336 int i;
337 struct netdev_hw_addr *ha;
338
339 i = 0;
340 netdev_for_each_mc_addr(ha, ndev) {
341 /* Ran out of space in chip? */
342 BUG_ON(i == KS8695_NR_ADDRESSES);
343
344 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
345 (ha->addr[4] << 8) | (ha->addr[5]);
346 high = (ha->addr[0] << 8) | (ha->addr[1]);
347
348 ks8695_writereg(ksp, KS8695_AAL_(i), low);
349 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
350 i++;
351 }
352
353 /* Clear the remaining Additional Station Addresses */
354 for (; i < KS8695_NR_ADDRESSES; i++) {
355 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
356 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
357 }
358}
359
360/* Interrupt handling */
361
362/**
363 * ks8695_tx_irq - Transmit IRQ handler
364 * @irq: The IRQ which went off (ignored)
365 * @dev_id: The net_device for the interrupt
366 *
367 * Process the TX ring, clearing out any transmitted slots.
368 * Allows the net_device to pass us new packets once slots are
369 * freed.
370 */
371static irqreturn_t
372ks8695_tx_irq(int irq, void *dev_id)
373{
374 struct net_device *ndev = (struct net_device *)dev_id;
375 struct ks8695_priv *ksp = netdev_priv(ndev);
376 int buff_n;
377
378 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
379 if (ksp->tx_buffers[buff_n].skb &&
380 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
381 rmb();
382 /* An SKB which is not owned by HW is present */
383 /* Update the stats for the net_device */
384 ndev->stats.tx_packets++;
385 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
386
387 /* Free the packet from the ring */
388 ksp->tx_ring[buff_n].data_ptr = 0;
389
390 /* Free the sk_buff */
391 dma_unmap_single(ksp->dev,
392 ksp->tx_buffers[buff_n].dma_ptr,
393 ksp->tx_buffers[buff_n].length,
394 DMA_TO_DEVICE);
395 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
396 ksp->tx_buffers[buff_n].skb = NULL;
397 ksp->tx_ring_used--;
398 }
399 }
400
401 netif_wake_queue(ndev);
402
403 return IRQ_HANDLED;
404}
405
406/**
407 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
408 * @ksp: Private data for the KS8695 Ethernet
409 *
410 * For KS8695 document:
411 * Interrupt Enable Register (offset 0xE204)
412 * Bit29 : WAN MAC Receive Interrupt Enable
413 * Bit16 : LAN MAC Receive Interrupt Enable
414 * Interrupt Status Register (Offset 0xF208)
415 * Bit29: WAN MAC Receive Status
416 * Bit16: LAN MAC Receive Status
417 * So, this Rx interrrupt enable/status bit number is equal
418 * as Rx IRQ number.
419 */
420static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
421{
422 return ksp->rx_irq;
423}
424
425/**
426 * ks8695_rx_irq - Receive IRQ handler
427 * @irq: The IRQ which went off (ignored)
428 * @dev_id: The net_device for the interrupt
429 *
430 * Inform NAPI that packet reception needs to be scheduled
431 */
432
433static irqreturn_t
434ks8695_rx_irq(int irq, void *dev_id)
435{
436 struct net_device *ndev = (struct net_device *)dev_id;
437 struct ks8695_priv *ksp = netdev_priv(ndev);
438
439 spin_lock(&ksp->rx_lock);
440
441 if (napi_schedule_prep(&ksp->napi)) {
442 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
443 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
444 /*disable rx interrupt*/
445 status &= ~mask_bit;
446 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
447 __napi_schedule(&ksp->napi);
448 }
449
450 spin_unlock(&ksp->rx_lock);
451 return IRQ_HANDLED;
452}
453
454/**
455 * ks8695_rx - Receive packets called by NAPI poll method
456 * @ksp: Private data for the KS8695 Ethernet
457 * @budget: Number of packets allowed to process
458 */
459static int ks8695_rx(struct ks8695_priv *ksp, int budget)
460{
461 struct net_device *ndev = ksp->ndev;
462 struct sk_buff *skb;
463 int buff_n;
464 u32 flags;
465 int pktlen;
466 int received = 0;
467
468 buff_n = ksp->next_rx_desc_read;
469 while (received < budget
470 && ksp->rx_buffers[buff_n].skb
471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
473 rmb();
474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
475
476 /* Found an SKB which we own, this means we
477 * received a packet
478 */
479 if ((flags & (RDES_FS | RDES_LS)) !=
480 (RDES_FS | RDES_LS)) {
481 /* This packet is not the first and
482 * the last segment. Therefore it is
483 * a "spanning" packet and we can't
484 * handle it
485 */
486 goto rx_failure;
487 }
488
489 if (flags & (RDES_ES | RDES_RE)) {
490 /* It's an error packet */
491 ndev->stats.rx_errors++;
492 if (flags & RDES_TL)
493 ndev->stats.rx_length_errors++;
494 if (flags & RDES_RF)
495 ndev->stats.rx_length_errors++;
496 if (flags & RDES_CE)
497 ndev->stats.rx_crc_errors++;
498 if (flags & RDES_RE)
499 ndev->stats.rx_missed_errors++;
500
501 goto rx_failure;
502 }
503
504 pktlen = flags & RDES_FLEN;
505 pktlen -= 4; /* Drop the CRC */
506
507 /* Retrieve the sk_buff */
508 skb = ksp->rx_buffers[buff_n].skb;
509
510 /* Clear it from the ring */
511 ksp->rx_buffers[buff_n].skb = NULL;
512 ksp->rx_ring[buff_n].data_ptr = 0;
513
514 /* Unmap the SKB */
515 dma_unmap_single(ksp->dev,
516 ksp->rx_buffers[buff_n].dma_ptr,
517 ksp->rx_buffers[buff_n].length,
518 DMA_FROM_DEVICE);
519
520 /* Relinquish the SKB to the network layer */
521 skb_put(skb, pktlen);
522 skb->protocol = eth_type_trans(skb, ndev);
523 netif_receive_skb(skb);
524
525 /* Record stats */
526 ndev->stats.rx_packets++;
527 ndev->stats.rx_bytes += pktlen;
528 goto rx_finished;
529
530rx_failure:
531 /* This ring entry is an error, but we can
532 * re-use the skb
533 */
534 /* Give the ring entry back to the hardware */
535 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
536rx_finished:
537 received++;
538 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
539 }
540
541 /* And note which RX descriptor we last did */
542 ksp->next_rx_desc_read = buff_n;
543
544 /* And refill the buffers */
545 ks8695_refill_rxbuffers(ksp);
546
547 /* Kick the RX DMA engine, in case it became suspended */
548 ks8695_writereg(ksp, KS8695_DRSC, 0);
549
550 return received;
551}
552
553
554/**
555 * ks8695_poll - Receive packet by NAPI poll method
556 * @ksp: Private data for the KS8695 Ethernet
557 * @budget: The remaining number packets for network subsystem
558 *
559 * Invoked by the network core when it requests for new
560 * packets from the driver
561 */
562static int ks8695_poll(struct napi_struct *napi, int budget)
563{
564 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
565 unsigned long work_done;
566
567 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
568 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
569
570 work_done = ks8695_rx(ksp, budget);
571
572 if (work_done < budget) {
573 unsigned long flags;
574 spin_lock_irqsave(&ksp->rx_lock, flags);
575 __napi_complete(napi);
576 /*enable rx interrupt*/
577 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
578 spin_unlock_irqrestore(&ksp->rx_lock, flags);
579 }
580 return work_done;
581}
582
583/**
584 * ks8695_link_irq - Link change IRQ handler
585 * @irq: The IRQ which went off (ignored)
586 * @dev_id: The net_device for the interrupt
587 *
588 * The WAN interface can generate an IRQ when the link changes,
589 * report this to the net layer and the user.
590 */
591static irqreturn_t
592ks8695_link_irq(int irq, void *dev_id)
593{
594 struct net_device *ndev = (struct net_device *)dev_id;
595 struct ks8695_priv *ksp = netdev_priv(ndev);
596 u32 ctrl;
597
598 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
599 if (ctrl & WMC_WLS) {
600 netif_carrier_on(ndev);
601 if (netif_msg_link(ksp))
602 dev_info(ksp->dev,
603 "%s: Link is now up (10%sMbps/%s-duplex)\n",
604 ndev->name,
605 (ctrl & WMC_WSS) ? "0" : "",
606 (ctrl & WMC_WDS) ? "Full" : "Half");
607 } else {
608 netif_carrier_off(ndev);
609 if (netif_msg_link(ksp))
610 dev_info(ksp->dev, "%s: Link is now down.\n",
611 ndev->name);
612 }
613
614 return IRQ_HANDLED;
615}
616
617
618/* KS8695 Device functions */
619
620/**
621 * ks8695_reset - Reset a KS8695 ethernet interface
622 * @ksp: The interface to reset
623 *
624 * Perform an engine reset of the interface and re-program it
625 * with sensible defaults.
626 */
627static void
628ks8695_reset(struct ks8695_priv *ksp)
629{
630 int reset_timeout = watchdog;
631 /* Issue the reset via the TX DMA control register */
632 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
633 while (reset_timeout--) {
634 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
635 break;
636 msleep(1);
637 }
638
639 if (reset_timeout < 0) {
640 dev_crit(ksp->dev,
641 "Timeout waiting for DMA engines to reset\n");
642 /* And blithely carry on */
643 }
644
645 /* Definitely wait long enough before attempting to program
646 * the engines
647 */
648 msleep(10);
649
650 /* RX: unicast and broadcast */
651 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
652 /* TX: pad and add CRC */
653 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
654}
655
656/**
657 * ks8695_shutdown - Shut down a KS8695 ethernet interface
658 * @ksp: The interface to shut down
659 *
660 * This disables packet RX/TX, cleans up IRQs, drains the rings,
661 * and basically places the interface into a clean shutdown
662 * state.
663 */
664static void
665ks8695_shutdown(struct ks8695_priv *ksp)
666{
667 u32 ctrl;
668 int buff_n;
669
670 /* Disable packet transmission */
671 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
672 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
673
674 /* Disable packet reception */
675 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
676 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
677
678 /* Release the IRQs */
679 free_irq(ksp->rx_irq, ksp->ndev);
680 free_irq(ksp->tx_irq, ksp->ndev);
681 if (ksp->link_irq != -1)
682 free_irq(ksp->link_irq, ksp->ndev);
683
684 /* Throw away any pending TX packets */
685 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
686 if (ksp->tx_buffers[buff_n].skb) {
687 /* Remove this SKB from the TX ring */
688 ksp->tx_ring[buff_n].owner = 0;
689 ksp->tx_ring[buff_n].status = 0;
690 ksp->tx_ring[buff_n].data_ptr = 0;
691
692 /* Unmap and bin this SKB */
693 dma_unmap_single(ksp->dev,
694 ksp->tx_buffers[buff_n].dma_ptr,
695 ksp->tx_buffers[buff_n].length,
696 DMA_TO_DEVICE);
697 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
698 ksp->tx_buffers[buff_n].skb = NULL;
699 }
700 }
701
702 /* Purge the RX buffers */
703 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
704 if (ksp->rx_buffers[buff_n].skb) {
705 /* Remove the SKB from the RX ring */
706 ksp->rx_ring[buff_n].status = 0;
707 ksp->rx_ring[buff_n].data_ptr = 0;
708
709 /* Unmap and bin the SKB */
710 dma_unmap_single(ksp->dev,
711 ksp->rx_buffers[buff_n].dma_ptr,
712 ksp->rx_buffers[buff_n].length,
713 DMA_FROM_DEVICE);
714 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
715 ksp->rx_buffers[buff_n].skb = NULL;
716 }
717 }
718}
719
720
721/**
722 * ks8695_setup_irq - IRQ setup helper function
723 * @irq: The IRQ number to claim
724 * @irq_name: The name to give the IRQ claimant
725 * @handler: The function to call to handle the IRQ
726 * @ndev: The net_device to pass in as the dev_id argument to the handler
727 *
728 * Return 0 on success.
729 */
730static int
731ks8695_setup_irq(int irq, const char *irq_name,
732 irq_handler_t handler, struct net_device *ndev)
733{
734 int ret;
735
736 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
737
738 if (ret) {
739 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
740 return ret;
741 }
742
743 return 0;
744}
745
746/**
747 * ks8695_init_net - Initialise a KS8695 ethernet interface
748 * @ksp: The interface to initialise
749 *
750 * This routine fills the RX ring, initialises the DMA engines,
751 * allocates the IRQs and then starts the packet TX and RX
752 * engines.
753 */
754static int
755ks8695_init_net(struct ks8695_priv *ksp)
756{
757 int ret;
758 u32 ctrl;
759
760 ks8695_refill_rxbuffers(ksp);
761
762 /* Initialise the DMA engines */
763 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
764 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
765
766 /* Request the IRQs */
767 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
768 ks8695_rx_irq, ksp->ndev);
769 if (ret)
770 return ret;
771 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
772 ks8695_tx_irq, ksp->ndev);
773 if (ret)
774 return ret;
775 if (ksp->link_irq != -1) {
776 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
777 ks8695_link_irq, ksp->ndev);
778 if (ret)
779 return ret;
780 }
781
782 /* Set up the ring indices */
783 ksp->next_rx_desc_read = 0;
784 ksp->tx_ring_next_slot = 0;
785 ksp->tx_ring_used = 0;
786
787 /* Bring up transmission */
788 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
789 /* Enable packet transmission */
790 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
791
792 /* Bring up the reception */
793 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
794 /* Enable packet reception */
795 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
796 /* And start the DMA engine */
797 ks8695_writereg(ksp, KS8695_DRSC, 0);
798
799 /* All done */
800 return 0;
801}
802
803/**
804 * ks8695_release_device - HW resource release for KS8695 e-net
805 * @ksp: The device to be freed
806 *
807 * This unallocates io memory regions, dma-coherent regions etc
808 * which were allocated in ks8695_probe.
809 */
810static void
811ks8695_release_device(struct ks8695_priv *ksp)
812{
813 /* Unmap the registers */
814 iounmap(ksp->io_regs);
815 if (ksp->phyiface_regs)
816 iounmap(ksp->phyiface_regs);
817
818 /* And release the request */
819 release_resource(ksp->regs_req);
820 kfree(ksp->regs_req);
821 if (ksp->phyiface_req) {
822 release_resource(ksp->phyiface_req);
823 kfree(ksp->phyiface_req);
824 }
825
826 /* Free the ring buffers */
827 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
828 ksp->ring_base, ksp->ring_base_dma);
829}
830
831/* Ethtool support */
832
833/**
834 * ks8695_get_msglevel - Get the messages enabled for emission
835 * @ndev: The network device to read from
836 */
837static u32
838ks8695_get_msglevel(struct net_device *ndev)
839{
840 struct ks8695_priv *ksp = netdev_priv(ndev);
841
842 return ksp->msg_enable;
843}
844
845/**
846 * ks8695_set_msglevel - Set the messages enabled for emission
847 * @ndev: The network device to configure
848 * @value: The messages to set for emission
849 */
850static void
851ks8695_set_msglevel(struct net_device *ndev, u32 value)
852{
853 struct ks8695_priv *ksp = netdev_priv(ndev);
854
855 ksp->msg_enable = value;
856}
857
858/**
859 * ks8695_wan_get_settings - Get device-specific settings.
860 * @ndev: The network device to read settings from
861 * @cmd: The ethtool structure to read into
862 */
863static int
864ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
865{
866 struct ks8695_priv *ksp = netdev_priv(ndev);
867 u32 ctrl;
868
869 /* All ports on the KS8695 support these... */
870 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
871 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
872 SUPPORTED_TP | SUPPORTED_MII);
873 cmd->transceiver = XCVR_INTERNAL;
874
875 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
876 cmd->port = PORT_MII;
877 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
878 cmd->phy_address = 0;
879
880 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881 if ((ctrl & WMC_WAND) == 0) {
882 /* auto-negotiation is enabled */
883 cmd->advertising |= ADVERTISED_Autoneg;
884 if (ctrl & WMC_WANA100F)
885 cmd->advertising |= ADVERTISED_100baseT_Full;
886 if (ctrl & WMC_WANA100H)
887 cmd->advertising |= ADVERTISED_100baseT_Half;
888 if (ctrl & WMC_WANA10F)
889 cmd->advertising |= ADVERTISED_10baseT_Full;
890 if (ctrl & WMC_WANA10H)
891 cmd->advertising |= ADVERTISED_10baseT_Half;
892 if (ctrl & WMC_WANAP)
893 cmd->advertising |= ADVERTISED_Pause;
894 cmd->autoneg = AUTONEG_ENABLE;
895
896 ethtool_cmd_speed_set(cmd,
897 (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
898 cmd->duplex = (ctrl & WMC_WDS) ?
899 DUPLEX_FULL : DUPLEX_HALF;
900 } else {
901 /* auto-negotiation is disabled */
902 cmd->autoneg = AUTONEG_DISABLE;
903
904 ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
905 SPEED_100 : SPEED_10));
906 cmd->duplex = (ctrl & WMC_WANFF) ?
907 DUPLEX_FULL : DUPLEX_HALF;
908 }
909
910 return 0;
911}
912
913/**
914 * ks8695_wan_set_settings - Set device-specific settings.
915 * @ndev: The network device to configure
916 * @cmd: The settings to configure
917 */
918static int
919ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
920{
921 struct ks8695_priv *ksp = netdev_priv(ndev);
922 u32 ctrl;
923
924 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
925 return -EINVAL;
926 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
927 return -EINVAL;
928 if (cmd->port != PORT_MII)
929 return -EINVAL;
930 if (cmd->transceiver != XCVR_INTERNAL)
931 return -EINVAL;
932 if ((cmd->autoneg != AUTONEG_DISABLE) &&
933 (cmd->autoneg != AUTONEG_ENABLE))
934 return -EINVAL;
935
936 if (cmd->autoneg == AUTONEG_ENABLE) {
937 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
938 ADVERTISED_10baseT_Full |
939 ADVERTISED_100baseT_Half |
940 ADVERTISED_100baseT_Full)) == 0)
941 return -EINVAL;
942
943 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
944
945 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
946 WMC_WANA10F | WMC_WANA10H);
947 if (cmd->advertising & ADVERTISED_100baseT_Full)
948 ctrl |= WMC_WANA100F;
949 if (cmd->advertising & ADVERTISED_100baseT_Half)
950 ctrl |= WMC_WANA100H;
951 if (cmd->advertising & ADVERTISED_10baseT_Full)
952 ctrl |= WMC_WANA10F;
953 if (cmd->advertising & ADVERTISED_10baseT_Half)
954 ctrl |= WMC_WANA10H;
955
956 /* force a re-negotiation */
957 ctrl |= WMC_WANR;
958 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
959 } else {
960 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
961
962 /* disable auto-negotiation */
963 ctrl |= WMC_WAND;
964 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
965
966 if (cmd->speed == SPEED_100)
967 ctrl |= WMC_WANF100;
968 if (cmd->duplex == DUPLEX_FULL)
969 ctrl |= WMC_WANFF;
970
971 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
972 }
973
974 return 0;
975}
976
977/**
978 * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
979 * @ndev: The network device to restart autoneotiation on
980 */
981static int
982ks8695_wan_nwayreset(struct net_device *ndev)
983{
984 struct ks8695_priv *ksp = netdev_priv(ndev);
985 u32 ctrl;
986
987 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
988
989 if ((ctrl & WMC_WAND) == 0)
990 writel(ctrl | WMC_WANR,
991 ksp->phyiface_regs + KS8695_WMC);
992 else
993 /* auto-negotiation not enabled */
994 return -EINVAL;
995
996 return 0;
997}
998
999/**
1000 * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
1001 * @ndev: The device to retrieve settings from
1002 * @param: The structure to fill out with the information
1003 */
1004static void
1005ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1006{
1007 struct ks8695_priv *ksp = netdev_priv(ndev);
1008 u32 ctrl;
1009
1010 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1011
1012 /* advertise Pause */
1013 param->autoneg = (ctrl & WMC_WANAP);
1014
1015 /* current Rx Flow-control */
1016 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1017 param->rx_pause = (ctrl & DRXC_RFCE);
1018
1019 /* current Tx Flow-control */
1020 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1021 param->tx_pause = (ctrl & DTXC_TFCE);
1022}
1023
1024/**
1025 * ks8695_get_drvinfo - Retrieve driver information
1026 * @ndev: The network device to retrieve info about
1027 * @info: The info structure to fill out.
1028 */
1029static void
1030ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1031{
1032 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1033 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1034 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1035 sizeof(info->bus_info));
1036}
1037
1038static const struct ethtool_ops ks8695_ethtool_ops = {
1039 .get_msglevel = ks8695_get_msglevel,
1040 .set_msglevel = ks8695_set_msglevel,
1041 .get_drvinfo = ks8695_get_drvinfo,
1042};
1043
1044static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1045 .get_msglevel = ks8695_get_msglevel,
1046 .set_msglevel = ks8695_set_msglevel,
1047 .get_settings = ks8695_wan_get_settings,
1048 .set_settings = ks8695_wan_set_settings,
1049 .nway_reset = ks8695_wan_nwayreset,
1050 .get_link = ethtool_op_get_link,
1051 .get_pauseparam = ks8695_wan_get_pause,
1052 .get_drvinfo = ks8695_get_drvinfo,
1053};
1054
1055/* Network device interface functions */
1056
1057/**
1058 * ks8695_set_mac - Update MAC in net dev and HW
1059 * @ndev: The network device to update
1060 * @addr: The new MAC address to set
1061 */
1062static int
1063ks8695_set_mac(struct net_device *ndev, void *addr)
1064{
1065 struct ks8695_priv *ksp = netdev_priv(ndev);
1066 struct sockaddr *address = addr;
1067
1068 if (!is_valid_ether_addr(address->sa_data))
1069 return -EADDRNOTAVAIL;
1070
1071 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1072
1073 ks8695_update_mac(ksp);
1074
1075 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1076 ndev->name, ndev->dev_addr);
1077
1078 return 0;
1079}
1080
1081/**
1082 * ks8695_set_multicast - Set up the multicast behaviour of the interface
1083 * @ndev: The net_device to configure
1084 *
1085 * This routine, called by the net layer, configures promiscuity
1086 * and multicast reception behaviour for the interface.
1087 */
1088static void
1089ks8695_set_multicast(struct net_device *ndev)
1090{
1091 struct ks8695_priv *ksp = netdev_priv(ndev);
1092 u32 ctrl;
1093
1094 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1095
1096 if (ndev->flags & IFF_PROMISC) {
1097 /* enable promiscuous mode */
1098 ctrl |= DRXC_RA;
1099 } else if (ndev->flags & ~IFF_PROMISC) {
1100 /* disable promiscuous mode */
1101 ctrl &= ~DRXC_RA;
1102 }
1103
1104 if (ndev->flags & IFF_ALLMULTI) {
1105 /* enable all multicast mode */
1106 ctrl |= DRXC_RM;
1107 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1108 /* more specific multicast addresses than can be
1109 * handled in hardware
1110 */
1111 ctrl |= DRXC_RM;
1112 } else {
1113 /* enable specific multicasts */
1114 ctrl &= ~DRXC_RM;
1115 ks8695_init_partial_multicast(ksp, ndev);
1116 }
1117
1118 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1119}
1120
1121/**
1122 * ks8695_timeout - Handle a network tx/rx timeout.
1123 * @ndev: The net_device which timed out.
1124 *
1125 * A network transaction timed out, reset the device.
1126 */
1127static void
1128ks8695_timeout(struct net_device *ndev)
1129{
1130 struct ks8695_priv *ksp = netdev_priv(ndev);
1131
1132 netif_stop_queue(ndev);
1133 ks8695_shutdown(ksp);
1134
1135 ks8695_reset(ksp);
1136
1137 ks8695_update_mac(ksp);
1138
1139 /* We ignore the return from this since it managed to init
1140 * before it probably will be okay to init again.
1141 */
1142 ks8695_init_net(ksp);
1143
1144 /* Reconfigure promiscuity etc */
1145 ks8695_set_multicast(ndev);
1146
1147 /* And start the TX queue once more */
1148 netif_start_queue(ndev);
1149}
1150
1151/**
1152 * ks8695_start_xmit - Start a packet transmission
1153 * @skb: The packet to transmit
1154 * @ndev: The network device to send the packet on
1155 *
1156 * This routine, called by the net layer, takes ownership of the
1157 * sk_buff and adds it to the TX ring. It then kicks the TX DMA
1158 * engine to ensure transmission begins.
1159 */
1160static int
1161ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1162{
1163 struct ks8695_priv *ksp = netdev_priv(ndev);
1164 int buff_n;
1165 dma_addr_t dmap;
1166
1167 spin_lock_irq(&ksp->txq_lock);
1168
1169 if (ksp->tx_ring_used == MAX_TX_DESC) {
1170 /* Somehow we got entered when we have no room */
1171 spin_unlock_irq(&ksp->txq_lock);
1172 return NETDEV_TX_BUSY;
1173 }
1174
1175 buff_n = ksp->tx_ring_next_slot;
1176
1177 BUG_ON(ksp->tx_buffers[buff_n].skb);
1178
1179 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1180 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1181 /* Failed to DMA map this SKB, give it back for now */
1182 spin_unlock_irq(&ksp->txq_lock);
1183 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1184 "transmission, trying later\n", ndev->name);
1185 return NETDEV_TX_BUSY;
1186 }
1187
1188 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1189 /* Mapped okay, store the buffer pointer and length for later */
1190 ksp->tx_buffers[buff_n].skb = skb;
1191 ksp->tx_buffers[buff_n].length = skb->len;
1192
1193 /* Fill out the TX descriptor */
1194 ksp->tx_ring[buff_n].data_ptr =
1195 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1196 ksp->tx_ring[buff_n].status =
1197 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1198 (skb->len & TDES_TBS));
1199
1200 wmb();
1201
1202 /* Hand it over to the hardware */
1203 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1204
1205 if (++ksp->tx_ring_used == MAX_TX_DESC)
1206 netif_stop_queue(ndev);
1207
1208 /* Kick the TX DMA in case it decided to go IDLE */
1209 ks8695_writereg(ksp, KS8695_DTSC, 0);
1210
1211 /* And update the next ring slot */
1212 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1213
1214 spin_unlock_irq(&ksp->txq_lock);
1215 return NETDEV_TX_OK;
1216}
1217
1218/**
1219 * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
1220 * @ndev: The net_device to stop
1221 *
1222 * This disables the TX queue and cleans up a KS8695 ethernet
1223 * device.
1224 */
1225static int
1226ks8695_stop(struct net_device *ndev)
1227{
1228 struct ks8695_priv *ksp = netdev_priv(ndev);
1229
1230 netif_stop_queue(ndev);
1231 napi_disable(&ksp->napi);
1232
1233 ks8695_shutdown(ksp);
1234
1235 return 0;
1236}
1237
1238/**
1239 * ks8695_open - Open (bring up) a KS8695 ethernet interface
1240 * @ndev: The net_device to open
1241 *
1242 * This resets, configures the MAC, initialises the RX ring and
1243 * DMA engines and starts the TX queue for a KS8695 ethernet
1244 * device.
1245 */
1246static int
1247ks8695_open(struct net_device *ndev)
1248{
1249 struct ks8695_priv *ksp = netdev_priv(ndev);
1250 int ret;
1251
1252 if (!is_valid_ether_addr(ndev->dev_addr))
1253 return -EADDRNOTAVAIL;
1254
1255 ks8695_reset(ksp);
1256
1257 ks8695_update_mac(ksp);
1258
1259 ret = ks8695_init_net(ksp);
1260 if (ret) {
1261 ks8695_shutdown(ksp);
1262 return ret;
1263 }
1264
1265 napi_enable(&ksp->napi);
1266 netif_start_queue(ndev);
1267
1268 return 0;
1269}
1270
1271/* Platform device driver */
1272
1273/**
1274 * ks8695_init_switch - Init LAN switch to known good defaults.
1275 * @ksp: The device to initialise
1276 *
1277 * This initialises the LAN switch in the KS8695 to a known-good
1278 * set of defaults.
1279 */
1280static void __devinit
1281ks8695_init_switch(struct ks8695_priv *ksp)
1282{
1283 u32 ctrl;
1284
1285 /* Default value for SEC0 according to datasheet */
1286 ctrl = 0x40819e00;
1287
1288 /* LED0 = Speed LED1 = Link/Activity */
1289 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1290 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1291
1292 /* Enable Switch */
1293 ctrl |= SEC0_ENABLE;
1294
1295 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1296
1297 /* Defaults for SEC1 */
1298 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1299}
1300
1301/**
1302 * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
1303 * @ksp: The device to initialise
1304 *
1305 * This initialises a KS8695's WAN phy to sensible values for
1306 * autonegotiation etc.
1307 */
1308static void __devinit
1309ks8695_init_wan_phy(struct ks8695_priv *ksp)
1310{
1311 u32 ctrl;
1312
1313 /* Support auto-negotiation */
1314 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1315 WMC_WANA10F | WMC_WANA10H);
1316
1317 /* LED0 = Activity , LED1 = Link */
1318 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1319
1320 /* Restart Auto-negotiation */
1321 ctrl |= WMC_WANR;
1322
1323 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1324
1325 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1326 writel(0, ksp->phyiface_regs + KS8695_PPS);
1327}
1328
1329static const struct net_device_ops ks8695_netdev_ops = {
1330 .ndo_open = ks8695_open,
1331 .ndo_stop = ks8695_stop,
1332 .ndo_start_xmit = ks8695_start_xmit,
1333 .ndo_tx_timeout = ks8695_timeout,
1334 .ndo_set_mac_address = ks8695_set_mac,
1335 .ndo_validate_addr = eth_validate_addr,
1336 .ndo_set_multicast_list = ks8695_set_multicast,
1337};
1338
1339/**
1340 * ks8695_probe - Probe and initialise a KS8695 ethernet interface
1341 * @pdev: The platform device to probe
1342 *
1343 * Initialise a KS8695 ethernet device from platform data.
1344 *
1345 * This driver requires at least one IORESOURCE_MEM for the
1346 * registers and two IORESOURCE_IRQ for the RX and TX IRQs
1347 * respectively. It can optionally take an additional
1348 * IORESOURCE_MEM for the switch or phy in the case of the lan or
1349 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
1350 * port.
1351 */
1352static int __devinit
1353ks8695_probe(struct platform_device *pdev)
1354{
1355 struct ks8695_priv *ksp;
1356 struct net_device *ndev;
1357 struct resource *regs_res, *phyiface_res;
1358 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1359 int ret = 0;
1360 int buff_n;
1361 u32 machigh, maclow;
1362
1363 /* Initialise a net_device */
1364 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1365 if (!ndev) {
1366 dev_err(&pdev->dev, "could not allocate device.\n");
1367 return -ENOMEM;
1368 }
1369
1370 SET_NETDEV_DEV(ndev, &pdev->dev);
1371
1372 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1373
1374 /* Configure our private structure a little */
1375 ksp = netdev_priv(ndev);
1376
1377 ksp->dev = &pdev->dev;
1378 ksp->ndev = ndev;
1379 ksp->msg_enable = NETIF_MSG_LINK;
1380
1381 /* Retrieve resources */
1382 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1384
1385 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1386 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1387 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1388
1389 if (!(regs_res && rxirq_res && txirq_res)) {
1390 dev_err(ksp->dev, "insufficient resources\n");
1391 ret = -ENOENT;
1392 goto failure;
1393 }
1394
1395 ksp->regs_req = request_mem_region(regs_res->start,
1396 resource_size(regs_res),
1397 pdev->name);
1398
1399 if (!ksp->regs_req) {
1400 dev_err(ksp->dev, "cannot claim register space\n");
1401 ret = -EIO;
1402 goto failure;
1403 }
1404
1405 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1406
1407 if (!ksp->io_regs) {
1408 dev_err(ksp->dev, "failed to ioremap registers\n");
1409 ret = -EINVAL;
1410 goto failure;
1411 }
1412
1413 if (phyiface_res) {
1414 ksp->phyiface_req =
1415 request_mem_region(phyiface_res->start,
1416 resource_size(phyiface_res),
1417 phyiface_res->name);
1418
1419 if (!ksp->phyiface_req) {
1420 dev_err(ksp->dev,
1421 "cannot claim switch register space\n");
1422 ret = -EIO;
1423 goto failure;
1424 }
1425
1426 ksp->phyiface_regs = ioremap(phyiface_res->start,
1427 resource_size(phyiface_res));
1428
1429 if (!ksp->phyiface_regs) {
1430 dev_err(ksp->dev,
1431 "failed to ioremap switch registers\n");
1432 ret = -EINVAL;
1433 goto failure;
1434 }
1435 }
1436
1437 ksp->rx_irq = rxirq_res->start;
1438 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1439 ksp->tx_irq = txirq_res->start;
1440 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1441 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1442 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1443 linkirq_res->name : "Ethernet Link";
1444
1445 /* driver system setup */
1446 ndev->netdev_ops = &ks8695_netdev_ops;
1447 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1448
1449 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1450
1451 /* Retrieve the default MAC addr from the chip. */
1452 /* The bootloader should have left it in there for us. */
1453
1454 machigh = ks8695_readreg(ksp, KS8695_MAH);
1455 maclow = ks8695_readreg(ksp, KS8695_MAL);
1456
1457 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1458 ndev->dev_addr[1] = machigh & 0xFF;
1459 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1460 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1461 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1462 ndev->dev_addr[5] = maclow & 0xFF;
1463
1464 if (!is_valid_ether_addr(ndev->dev_addr))
1465 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1466 "set using ifconfig\n", ndev->name);
1467
1468 /* In order to be efficient memory-wise, we allocate both
1469 * rings in one go.
1470 */
1471 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1472 &ksp->ring_base_dma, GFP_KERNEL);
1473 if (!ksp->ring_base) {
1474 ret = -ENOMEM;
1475 goto failure;
1476 }
1477
1478 /* Specify the TX DMA ring buffer */
1479 ksp->tx_ring = ksp->ring_base;
1480 ksp->tx_ring_dma = ksp->ring_base_dma;
1481
1482 /* And initialise the queue's lock */
1483 spin_lock_init(&ksp->txq_lock);
1484 spin_lock_init(&ksp->rx_lock);
1485
1486 /* Specify the RX DMA ring buffer */
1487 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1488 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1489
1490 /* Zero the descriptor rings */
1491 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1492 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1493
1494 /* Build the rings */
1495 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1496 ksp->tx_ring[buff_n].next_desc =
1497 cpu_to_le32(ksp->tx_ring_dma +
1498 (sizeof(struct tx_ring_desc) *
1499 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1500 }
1501
1502 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1503 ksp->rx_ring[buff_n].next_desc =
1504 cpu_to_le32(ksp->rx_ring_dma +
1505 (sizeof(struct rx_ring_desc) *
1506 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1507 }
1508
1509 /* Initialise the port (physically) */
1510 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1511 ks8695_init_switch(ksp);
1512 ksp->dtype = KS8695_DTYPE_LAN;
1513 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1514 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1515 ks8695_init_wan_phy(ksp);
1516 ksp->dtype = KS8695_DTYPE_WAN;
1517 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1518 } else {
1519 /* No initialisation since HPNA does not have a PHY */
1520 ksp->dtype = KS8695_DTYPE_HPNA;
1521 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1522 }
1523
1524 /* And bring up the net_device with the net core */
1525 platform_set_drvdata(pdev, ndev);
1526 ret = register_netdev(ndev);
1527
1528 if (ret == 0) {
1529 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1530 ks8695_port_type(ksp), ndev->dev_addr);
1531 } else {
1532 /* Report the failure to register the net_device */
1533 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1534 goto failure;
1535 }
1536
1537 /* All is well */
1538 return 0;
1539
1540 /* Error exit path */
1541failure:
1542 ks8695_release_device(ksp);
1543 free_netdev(ndev);
1544
1545 return ret;
1546}
1547
1548/**
1549 * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
1550 * @pdev: The device to suspend
1551 * @state: The suspend state
1552 *
1553 * This routine detaches and shuts down a KS8695 ethernet device.
1554 */
1555static int
1556ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1557{
1558 struct net_device *ndev = platform_get_drvdata(pdev);
1559 struct ks8695_priv *ksp = netdev_priv(ndev);
1560
1561 ksp->in_suspend = 1;
1562
1563 if (netif_running(ndev)) {
1564 netif_device_detach(ndev);
1565 ks8695_shutdown(ksp);
1566 }
1567
1568 return 0;
1569}
1570
1571/**
1572 * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
1573 * @pdev: The device to resume
1574 *
1575 * This routine re-initialises and re-attaches a KS8695 ethernet
1576 * device.
1577 */
1578static int
1579ks8695_drv_resume(struct platform_device *pdev)
1580{
1581 struct net_device *ndev = platform_get_drvdata(pdev);
1582 struct ks8695_priv *ksp = netdev_priv(ndev);
1583
1584 if (netif_running(ndev)) {
1585 ks8695_reset(ksp);
1586 ks8695_init_net(ksp);
1587 ks8695_set_multicast(ndev);
1588 netif_device_attach(ndev);
1589 }
1590
1591 ksp->in_suspend = 0;
1592
1593 return 0;
1594}
1595
1596/**
1597 * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
1598 * @pdev: The platform device to remove
1599 *
1600 * This unregisters and releases a KS8695 ethernet device.
1601 */
1602static int __devexit
1603ks8695_drv_remove(struct platform_device *pdev)
1604{
1605 struct net_device *ndev = platform_get_drvdata(pdev);
1606 struct ks8695_priv *ksp = netdev_priv(ndev);
1607
1608 platform_set_drvdata(pdev, NULL);
1609 netif_napi_del(&ksp->napi);
1610
1611 unregister_netdev(ndev);
1612 ks8695_release_device(ksp);
1613 free_netdev(ndev);
1614
1615 dev_dbg(&pdev->dev, "released and freed device\n");
1616 return 0;
1617}
1618
1619static struct platform_driver ks8695_driver = {
1620 .driver = {
1621 .name = MODULENAME,
1622 .owner = THIS_MODULE,
1623 },
1624 .probe = ks8695_probe,
1625 .remove = __devexit_p(ks8695_drv_remove),
1626 .suspend = ks8695_drv_suspend,
1627 .resume = ks8695_drv_resume,
1628};
1629
1630/* Module interface */
1631
1632static int __init
1633ks8695_init(void)
1634{
1635 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1636 MODULENAME, MODULEVERSION);
1637
1638 return platform_driver_register(&ks8695_driver);
1639}
1640
1641static void __exit
1642ks8695_cleanup(void)
1643{
1644 platform_driver_unregister(&ks8695_driver);
1645}
1646
1647module_init(ks8695_init);
1648module_exit(ks8695_cleanup);
1649
1650MODULE_AUTHOR("Simtec Electronics");
1651MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1652MODULE_LICENSE("GPL");
1653MODULE_ALIAS("platform:" MODULENAME);
1654
1655module_param(watchdog, int, 0400);
1656MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
diff --git a/drivers/net/arm/ks8695net.h b/drivers/net/arm/ks8695net.h
new file mode 100644
index 00000000000..80eff6ea516
--- /dev/null
+++ b/drivers/net/arm/ks8695net.h
@@ -0,0 +1,107 @@
1/*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * Copyright 2008 Simtec Electronics
5 * Daniel Silverstone <dsilvers@simtec.co.uk>
6 * Vincent Sanders <vince@simtec.co.uk>
7 */
8
9#ifndef KS8695NET_H
10#define KS8695NET_H
11
12/* Receive descriptor flags */
13#define RDES_OWN (1 << 31) /* Ownership */
14#define RDES_FS (1 << 30) /* First Descriptor */
15#define RDES_LS (1 << 29) /* Last Descriptor */
16#define RDES_IPE (1 << 28) /* IP Checksum error */
17#define RDES_TCPE (1 << 27) /* TCP Checksum error */
18#define RDES_UDPE (1 << 26) /* UDP Checksum error */
19#define RDES_ES (1 << 25) /* Error summary */
20#define RDES_MF (1 << 24) /* Multicast Frame */
21#define RDES_RE (1 << 19) /* MII Error reported */
22#define RDES_TL (1 << 18) /* Frame too Long */
23#define RDES_RF (1 << 17) /* Runt Frame */
24#define RDES_CE (1 << 16) /* CRC error */
25#define RDES_FT (1 << 15) /* Frame Type */
26#define RDES_FLEN (0x7ff) /* Frame Length */
27
28#define RDES_RER (1 << 25) /* Receive End of Ring */
29#define RDES_RBS (0x7ff) /* Receive Buffer Size */
30
31/* Transmit descriptor flags */
32
33#define TDES_OWN (1 << 31) /* Ownership */
34
35#define TDES_IC (1 << 31) /* Interrupt on Completion */
36#define TDES_FS (1 << 30) /* First Segment */
37#define TDES_LS (1 << 29) /* Last Segment */
38#define TDES_IPCKG (1 << 28) /* IP Checksum generate */
39#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */
40#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */
41#define TDES_TER (1 << 25) /* Transmit End of Ring */
42#define TDES_TBS (0x7ff) /* Transmit Buffer Size */
43
44/*
45 * Network controller register offsets
46 */
47#define KS8695_DTXC (0x00) /* DMA Transmit Control */
48#define KS8695_DRXC (0x04) /* DMA Receive Control */
49#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */
50#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */
51#define KS8695_TDLB (0x10) /* Transmit Descriptor List
52 * Base Address
53 */
54#define KS8695_RDLB (0x14) /* Receive Descriptor List
55 * Base Address
56 */
57#define KS8695_MAL (0x18) /* MAC Station Address Low */
58#define KS8695_MAH (0x1c) /* MAC Station Address High */
59#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional
60 * Station Address
61 * (0..15) Low
62 */
63#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional
64 * Station Address
65 * (0..15) High
66 */
67
68
69/* DMA Transmit Control Register */
70#define DTXC_TRST (1 << 31) /* Soft Reset */
71#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */
72#define DTXC_TUCG (1 << 18) /* Transmit UDP
73 * Checksum Generate
74 */
75#define DTXC_TTCG (1 << 17) /* Transmit TCP
76 * Checksum Generate
77 */
78#define DTXC_TICG (1 << 16) /* Transmit IP
79 * Checksum Generate
80 */
81#define DTXC_TFCE (1 << 9) /* Transmit Flow
82 * Control Enable
83 */
84#define DTXC_TLB (1 << 8) /* Loopback mode */
85#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */
86#define DTXC_TAC (1 << 1) /* Transmit Add CRC */
87#define DTXC_TE (1 << 0) /* TX Enable */
88
89/* DMA Receive Control Register */
90#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */
91#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */
92#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */
93#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */
94#define DRXC_RFCE (1 << 9) /* Receive Flow Control
95 * Enable
96 */
97#define DRXC_RB (1 << 6) /* Receive Broadcast */
98#define DRXC_RM (1 << 5) /* Receive Multicast */
99#define DRXC_RU (1 << 4) /* Receive Unicast */
100#define DRXC_RERR (1 << 3) /* Receive Error Frame */
101#define DRXC_RA (1 << 2) /* Receive All */
102#define DRXC_RE (1 << 0) /* RX Enable */
103
104/* Additional Station Address High */
105#define AAH_E (1 << 31) /* Address Enabled */
106
107#endif /* KS8695NET_H */
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
new file mode 100644
index 00000000000..bfea499a351
--- /dev/null
+++ b/drivers/net/arm/w90p910_ether.c
@@ -0,0 +1,1123 @@
1/*
2 * Copyright (c) 2008-2009 Nuvoton technology corporation.
3 *
4 * Wan ZongShun <mcuos.com@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation;version 2 of the License.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/mii.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/skbuff.h>
18#include <linux/ethtool.h>
19#include <linux/platform_device.h>
20#include <linux/clk.h>
21#include <linux/gfp.h>
22
23#define DRV_MODULE_NAME "w90p910-emc"
24#define DRV_MODULE_VERSION "0.1"
25
26/* Ethernet MAC Registers */
27#define REG_CAMCMR 0x00
28#define REG_CAMEN 0x04
29#define REG_CAMM_BASE 0x08
30#define REG_CAML_BASE 0x0c
31#define REG_TXDLSA 0x88
32#define REG_RXDLSA 0x8C
33#define REG_MCMDR 0x90
34#define REG_MIID 0x94
35#define REG_MIIDA 0x98
36#define REG_FFTCR 0x9C
37#define REG_TSDR 0xa0
38#define REG_RSDR 0xa4
39#define REG_DMARFC 0xa8
40#define REG_MIEN 0xac
41#define REG_MISTA 0xb0
42#define REG_CTXDSA 0xcc
43#define REG_CTXBSA 0xd0
44#define REG_CRXDSA 0xd4
45#define REG_CRXBSA 0xd8
46
47/* mac controller bit */
48#define MCMDR_RXON 0x01
49#define MCMDR_ACP (0x01 << 3)
50#define MCMDR_SPCRC (0x01 << 5)
51#define MCMDR_TXON (0x01 << 8)
52#define MCMDR_FDUP (0x01 << 18)
53#define MCMDR_ENMDC (0x01 << 19)
54#define MCMDR_OPMOD (0x01 << 20)
55#define SWR (0x01 << 24)
56
57/* cam command regiser */
58#define CAMCMR_AUP 0x01
59#define CAMCMR_AMP (0x01 << 1)
60#define CAMCMR_ABP (0x01 << 2)
61#define CAMCMR_CCAM (0x01 << 3)
62#define CAMCMR_ECMP (0x01 << 4)
63#define CAM0EN 0x01
64
65/* mac mii controller bit */
66#define MDCCR (0x0a << 20)
67#define PHYAD (0x01 << 8)
68#define PHYWR (0x01 << 16)
69#define PHYBUSY (0x01 << 17)
70#define PHYPRESP (0x01 << 18)
71#define CAM_ENTRY_SIZE 0x08
72
73/* rx and tx status */
74#define TXDS_TXCP (0x01 << 19)
75#define RXDS_CRCE (0x01 << 17)
76#define RXDS_PTLE (0x01 << 19)
77#define RXDS_RXGD (0x01 << 20)
78#define RXDS_ALIE (0x01 << 21)
79#define RXDS_RP (0x01 << 22)
80
81/* mac interrupt status*/
82#define MISTA_EXDEF (0x01 << 19)
83#define MISTA_TXBERR (0x01 << 24)
84#define MISTA_TDU (0x01 << 23)
85#define MISTA_RDU (0x01 << 10)
86#define MISTA_RXBERR (0x01 << 11)
87
88#define ENSTART 0x01
89#define ENRXINTR 0x01
90#define ENRXGD (0x01 << 4)
91#define ENRXBERR (0x01 << 11)
92#define ENTXINTR (0x01 << 16)
93#define ENTXCP (0x01 << 18)
94#define ENTXABT (0x01 << 21)
95#define ENTXBERR (0x01 << 24)
96#define ENMDC (0x01 << 19)
97#define PHYBUSY (0x01 << 17)
98#define MDCCR_VAL 0xa00000
99
100/* rx and tx owner bit */
101#define RX_OWEN_DMA (0x01 << 31)
102#define RX_OWEN_CPU (~(0x03 << 30))
103#define TX_OWEN_DMA (0x01 << 31)
104#define TX_OWEN_CPU (~(0x01 << 31))
105
106/* tx frame desc controller bit */
107#define MACTXINTEN 0x04
108#define CRCMODE 0x02
109#define PADDINGMODE 0x01
110
111/* fftcr controller bit */
112#define TXTHD (0x03 << 8)
113#define BLENGTH (0x01 << 20)
114
115/* global setting for driver */
116#define RX_DESC_SIZE 50
117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600
120#define TX_TIMEOUT (HZ/2)
121#define DELAY 1000
122#define CAM0 0x0
123
124static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
125
126struct w90p910_rxbd {
127 unsigned int sl;
128 unsigned int buffer;
129 unsigned int reserved;
130 unsigned int next;
131};
132
133struct w90p910_txbd {
134 unsigned int mode;
135 unsigned int buffer;
136 unsigned int sl;
137 unsigned int next;
138};
139
140struct recv_pdesc {
141 struct w90p910_rxbd desclist[RX_DESC_SIZE];
142 char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
143};
144
145struct tran_pdesc {
146 struct w90p910_txbd desclist[TX_DESC_SIZE];
147 char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
148};
149
150struct w90p910_ether {
151 struct recv_pdesc *rdesc;
152 struct tran_pdesc *tdesc;
153 dma_addr_t rdesc_phys;
154 dma_addr_t tdesc_phys;
155 struct net_device_stats stats;
156 struct platform_device *pdev;
157 struct resource *res;
158 struct sk_buff *skb;
159 struct clk *clk;
160 struct clk *rmiiclk;
161 struct mii_if_info mii;
162 struct timer_list check_timer;
163 void __iomem *reg;
164 int rxirq;
165 int txirq;
166 unsigned int cur_tx;
167 unsigned int cur_rx;
168 unsigned int finish_tx;
169 unsigned int rx_packets;
170 unsigned int rx_bytes;
171 unsigned int start_tx_ptr;
172 unsigned int start_rx_ptr;
173 unsigned int linkflag;
174};
175
176static void update_linkspeed_register(struct net_device *dev,
177 unsigned int speed, unsigned int duplex)
178{
179 struct w90p910_ether *ether = netdev_priv(dev);
180 unsigned int val;
181
182 val = __raw_readl(ether->reg + REG_MCMDR);
183
184 if (speed == SPEED_100) {
185 /* 100 full/half duplex */
186 if (duplex == DUPLEX_FULL) {
187 val |= (MCMDR_OPMOD | MCMDR_FDUP);
188 } else {
189 val |= MCMDR_OPMOD;
190 val &= ~MCMDR_FDUP;
191 }
192 } else {
193 /* 10 full/half duplex */
194 if (duplex == DUPLEX_FULL) {
195 val |= MCMDR_FDUP;
196 val &= ~MCMDR_OPMOD;
197 } else {
198 val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
199 }
200 }
201
202 __raw_writel(val, ether->reg + REG_MCMDR);
203}
204
205static void update_linkspeed(struct net_device *dev)
206{
207 struct w90p910_ether *ether = netdev_priv(dev);
208 struct platform_device *pdev;
209 unsigned int bmsr, bmcr, lpa, speed, duplex;
210
211 pdev = ether->pdev;
212
213 if (!mii_link_ok(&ether->mii)) {
214 ether->linkflag = 0x0;
215 netif_carrier_off(dev);
216 dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
217 return;
218 }
219
220 if (ether->linkflag == 1)
221 return;
222
223 bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
224 bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
225
226 if (bmcr & BMCR_ANENABLE) {
227 if (!(bmsr & BMSR_ANEGCOMPLETE))
228 return;
229
230 lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
231
232 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
233 speed = SPEED_100;
234 else
235 speed = SPEED_10;
236
237 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
238 duplex = DUPLEX_FULL;
239 else
240 duplex = DUPLEX_HALF;
241
242 } else {
243 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
244 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
245 }
246
247 update_linkspeed_register(dev, speed, duplex);
248
249 dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
250 (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
251 ether->linkflag = 0x01;
252
253 netif_carrier_on(dev);
254}
255
256static void w90p910_check_link(unsigned long dev_id)
257{
258 struct net_device *dev = (struct net_device *) dev_id;
259 struct w90p910_ether *ether = netdev_priv(dev);
260
261 update_linkspeed(dev);
262 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
263}
264
265static void w90p910_write_cam(struct net_device *dev,
266 unsigned int x, unsigned char *pval)
267{
268 struct w90p910_ether *ether = netdev_priv(dev);
269 unsigned int msw, lsw;
270
271 msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
272
273 lsw = (pval[4] << 24) | (pval[5] << 16);
274
275 __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
276 __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
277}
278
279static int w90p910_init_desc(struct net_device *dev)
280{
281 struct w90p910_ether *ether;
282 struct w90p910_txbd *tdesc;
283 struct w90p910_rxbd *rdesc;
284 struct platform_device *pdev;
285 unsigned int i;
286
287 ether = netdev_priv(dev);
288 pdev = ether->pdev;
289
290 ether->tdesc = (struct tran_pdesc *)
291 dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
292 &ether->tdesc_phys, GFP_KERNEL);
293
294 if (!ether->tdesc) {
295 dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
296 return -ENOMEM;
297 }
298
299 ether->rdesc = (struct recv_pdesc *)
300 dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
301 &ether->rdesc_phys, GFP_KERNEL);
302
303 if (!ether->rdesc) {
304 dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
305 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
306 ether->tdesc, ether->tdesc_phys);
307 return -ENOMEM;
308 }
309
310 for (i = 0; i < TX_DESC_SIZE; i++) {
311 unsigned int offset;
312
313 tdesc = &(ether->tdesc->desclist[i]);
314
315 if (i == TX_DESC_SIZE - 1)
316 offset = offsetof(struct tran_pdesc, desclist[0]);
317 else
318 offset = offsetof(struct tran_pdesc, desclist[i + 1]);
319
320 tdesc->next = ether->tdesc_phys + offset;
321 tdesc->buffer = ether->tdesc_phys +
322 offsetof(struct tran_pdesc, tran_buf[i]);
323 tdesc->sl = 0;
324 tdesc->mode = 0;
325 }
326
327 ether->start_tx_ptr = ether->tdesc_phys;
328
329 for (i = 0; i < RX_DESC_SIZE; i++) {
330 unsigned int offset;
331
332 rdesc = &(ether->rdesc->desclist[i]);
333
334 if (i == RX_DESC_SIZE - 1)
335 offset = offsetof(struct recv_pdesc, desclist[0]);
336 else
337 offset = offsetof(struct recv_pdesc, desclist[i + 1]);
338
339 rdesc->next = ether->rdesc_phys + offset;
340 rdesc->sl = RX_OWEN_DMA;
341 rdesc->buffer = ether->rdesc_phys +
342 offsetof(struct recv_pdesc, recv_buf[i]);
343 }
344
345 ether->start_rx_ptr = ether->rdesc_phys;
346
347 return 0;
348}
349
350static void w90p910_set_fifo_threshold(struct net_device *dev)
351{
352 struct w90p910_ether *ether = netdev_priv(dev);
353 unsigned int val;
354
355 val = TXTHD | BLENGTH;
356 __raw_writel(val, ether->reg + REG_FFTCR);
357}
358
359static void w90p910_return_default_idle(struct net_device *dev)
360{
361 struct w90p910_ether *ether = netdev_priv(dev);
362 unsigned int val;
363
364 val = __raw_readl(ether->reg + REG_MCMDR);
365 val |= SWR;
366 __raw_writel(val, ether->reg + REG_MCMDR);
367}
368
369static void w90p910_trigger_rx(struct net_device *dev)
370{
371 struct w90p910_ether *ether = netdev_priv(dev);
372
373 __raw_writel(ENSTART, ether->reg + REG_RSDR);
374}
375
376static void w90p910_trigger_tx(struct net_device *dev)
377{
378 struct w90p910_ether *ether = netdev_priv(dev);
379
380 __raw_writel(ENSTART, ether->reg + REG_TSDR);
381}
382
383static void w90p910_enable_mac_interrupt(struct net_device *dev)
384{
385 struct w90p910_ether *ether = netdev_priv(dev);
386 unsigned int val;
387
388 val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
389 val |= ENTXBERR | ENRXBERR | ENTXABT;
390
391 __raw_writel(val, ether->reg + REG_MIEN);
392}
393
394static void w90p910_get_and_clear_int(struct net_device *dev,
395 unsigned int *val)
396{
397 struct w90p910_ether *ether = netdev_priv(dev);
398
399 *val = __raw_readl(ether->reg + REG_MISTA);
400 __raw_writel(*val, ether->reg + REG_MISTA);
401}
402
403static void w90p910_set_global_maccmd(struct net_device *dev)
404{
405 struct w90p910_ether *ether = netdev_priv(dev);
406 unsigned int val;
407
408 val = __raw_readl(ether->reg + REG_MCMDR);
409 val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
410 __raw_writel(val, ether->reg + REG_MCMDR);
411}
412
413static void w90p910_enable_cam(struct net_device *dev)
414{
415 struct w90p910_ether *ether = netdev_priv(dev);
416 unsigned int val;
417
418 w90p910_write_cam(dev, CAM0, dev->dev_addr);
419
420 val = __raw_readl(ether->reg + REG_CAMEN);
421 val |= CAM0EN;
422 __raw_writel(val, ether->reg + REG_CAMEN);
423}
424
425static void w90p910_enable_cam_command(struct net_device *dev)
426{
427 struct w90p910_ether *ether = netdev_priv(dev);
428 unsigned int val;
429
430 val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
431 __raw_writel(val, ether->reg + REG_CAMCMR);
432}
433
434static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
435{
436 struct w90p910_ether *ether = netdev_priv(dev);
437 unsigned int val;
438
439 val = __raw_readl(ether->reg + REG_MCMDR);
440
441 if (enable)
442 val |= MCMDR_TXON;
443 else
444 val &= ~MCMDR_TXON;
445
446 __raw_writel(val, ether->reg + REG_MCMDR);
447}
448
449static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
450{
451 struct w90p910_ether *ether = netdev_priv(dev);
452 unsigned int val;
453
454 val = __raw_readl(ether->reg + REG_MCMDR);
455
456 if (enable)
457 val |= MCMDR_RXON;
458 else
459 val &= ~MCMDR_RXON;
460
461 __raw_writel(val, ether->reg + REG_MCMDR);
462}
463
464static void w90p910_set_curdest(struct net_device *dev)
465{
466 struct w90p910_ether *ether = netdev_priv(dev);
467
468 __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
469 __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
470}
471
472static void w90p910_reset_mac(struct net_device *dev)
473{
474 struct w90p910_ether *ether = netdev_priv(dev);
475
476 w90p910_enable_tx(dev, 0);
477 w90p910_enable_rx(dev, 0);
478 w90p910_set_fifo_threshold(dev);
479 w90p910_return_default_idle(dev);
480
481 if (!netif_queue_stopped(dev))
482 netif_stop_queue(dev);
483
484 w90p910_init_desc(dev);
485
486 dev->trans_start = jiffies; /* prevent tx timeout */
487 ether->cur_tx = 0x0;
488 ether->finish_tx = 0x0;
489 ether->cur_rx = 0x0;
490
491 w90p910_set_curdest(dev);
492 w90p910_enable_cam(dev);
493 w90p910_enable_cam_command(dev);
494 w90p910_enable_mac_interrupt(dev);
495 w90p910_enable_tx(dev, 1);
496 w90p910_enable_rx(dev, 1);
497 w90p910_trigger_tx(dev);
498 w90p910_trigger_rx(dev);
499
500 dev->trans_start = jiffies; /* prevent tx timeout */
501
502 if (netif_queue_stopped(dev))
503 netif_wake_queue(dev);
504}
505
506static void w90p910_mdio_write(struct net_device *dev,
507 int phy_id, int reg, int data)
508{
509 struct w90p910_ether *ether = netdev_priv(dev);
510 struct platform_device *pdev;
511 unsigned int val, i;
512
513 pdev = ether->pdev;
514
515 __raw_writel(data, ether->reg + REG_MIID);
516
517 val = (phy_id << 0x08) | reg;
518 val |= PHYBUSY | PHYWR | MDCCR_VAL;
519 __raw_writel(val, ether->reg + REG_MIIDA);
520
521 for (i = 0; i < DELAY; i++) {
522 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
523 break;
524 }
525
526 if (i == DELAY)
527 dev_warn(&pdev->dev, "mdio write timed out\n");
528}
529
530static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
531{
532 struct w90p910_ether *ether = netdev_priv(dev);
533 struct platform_device *pdev;
534 unsigned int val, i, data;
535
536 pdev = ether->pdev;
537
538 val = (phy_id << 0x08) | reg;
539 val |= PHYBUSY | MDCCR_VAL;
540 __raw_writel(val, ether->reg + REG_MIIDA);
541
542 for (i = 0; i < DELAY; i++) {
543 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
544 break;
545 }
546
547 if (i == DELAY) {
548 dev_warn(&pdev->dev, "mdio read timed out\n");
549 data = 0xffff;
550 } else {
551 data = __raw_readl(ether->reg + REG_MIID);
552 }
553
554 return data;
555}
556
557static int w90p910_set_mac_address(struct net_device *dev, void *addr)
558{
559 struct sockaddr *address = addr;
560
561 if (!is_valid_ether_addr(address->sa_data))
562 return -EADDRNOTAVAIL;
563
564 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
565 w90p910_write_cam(dev, CAM0, dev->dev_addr);
566
567 return 0;
568}
569
570static int w90p910_ether_close(struct net_device *dev)
571{
572 struct w90p910_ether *ether = netdev_priv(dev);
573 struct platform_device *pdev;
574
575 pdev = ether->pdev;
576
577 dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
578 ether->rdesc, ether->rdesc_phys);
579 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
580 ether->tdesc, ether->tdesc_phys);
581
582 netif_stop_queue(dev);
583
584 del_timer_sync(&ether->check_timer);
585 clk_disable(ether->rmiiclk);
586 clk_disable(ether->clk);
587
588 free_irq(ether->txirq, dev);
589 free_irq(ether->rxirq, dev);
590
591 return 0;
592}
593
594static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
595{
596 struct w90p910_ether *ether;
597
598 ether = netdev_priv(dev);
599
600 return &ether->stats;
601}
602
603static int w90p910_send_frame(struct net_device *dev,
604 unsigned char *data, int length)
605{
606 struct w90p910_ether *ether;
607 struct w90p910_txbd *txbd;
608 struct platform_device *pdev;
609 unsigned char *buffer;
610
611 ether = netdev_priv(dev);
612 pdev = ether->pdev;
613
614 txbd = &ether->tdesc->desclist[ether->cur_tx];
615 buffer = ether->tdesc->tran_buf[ether->cur_tx];
616
617 if (length > 1514) {
618 dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
619 length = 1514;
620 }
621
622 txbd->sl = length & 0xFFFF;
623
624 memcpy(buffer, data, length);
625
626 txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
627
628 w90p910_enable_tx(dev, 1);
629
630 w90p910_trigger_tx(dev);
631
632 if (++ether->cur_tx >= TX_DESC_SIZE)
633 ether->cur_tx = 0;
634
635 txbd = &ether->tdesc->desclist[ether->cur_tx];
636
637 if (txbd->mode & TX_OWEN_DMA)
638 netif_stop_queue(dev);
639
640 return 0;
641}
642
643static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
644{
645 struct w90p910_ether *ether = netdev_priv(dev);
646
647 if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
648 ether->skb = skb;
649 dev_kfree_skb_irq(skb);
650 return 0;
651 }
652 return -EAGAIN;
653}
654
655static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
656{
657 struct w90p910_ether *ether;
658 struct w90p910_txbd *txbd;
659 struct platform_device *pdev;
660 struct net_device *dev;
661 unsigned int cur_entry, entry, status;
662
663 dev = dev_id;
664 ether = netdev_priv(dev);
665 pdev = ether->pdev;
666
667 w90p910_get_and_clear_int(dev, &status);
668
669 cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
670
671 entry = ether->tdesc_phys +
672 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
673
674 while (entry != cur_entry) {
675 txbd = &ether->tdesc->desclist[ether->finish_tx];
676
677 if (++ether->finish_tx >= TX_DESC_SIZE)
678 ether->finish_tx = 0;
679
680 if (txbd->sl & TXDS_TXCP) {
681 ether->stats.tx_packets++;
682 ether->stats.tx_bytes += txbd->sl & 0xFFFF;
683 } else {
684 ether->stats.tx_errors++;
685 }
686
687 txbd->sl = 0x0;
688 txbd->mode = 0x0;
689
690 if (netif_queue_stopped(dev))
691 netif_wake_queue(dev);
692
693 entry = ether->tdesc_phys +
694 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
695 }
696
697 if (status & MISTA_EXDEF) {
698 dev_err(&pdev->dev, "emc defer exceed interrupt\n");
699 } else if (status & MISTA_TXBERR) {
700 dev_err(&pdev->dev, "emc bus error interrupt\n");
701 w90p910_reset_mac(dev);
702 } else if (status & MISTA_TDU) {
703 if (netif_queue_stopped(dev))
704 netif_wake_queue(dev);
705 }
706
707 return IRQ_HANDLED;
708}
709
710static void netdev_rx(struct net_device *dev)
711{
712 struct w90p910_ether *ether;
713 struct w90p910_rxbd *rxbd;
714 struct platform_device *pdev;
715 struct sk_buff *skb;
716 unsigned char *data;
717 unsigned int length, status, val, entry;
718
719 ether = netdev_priv(dev);
720 pdev = ether->pdev;
721
722 rxbd = &ether->rdesc->desclist[ether->cur_rx];
723
724 do {
725 val = __raw_readl(ether->reg + REG_CRXDSA);
726
727 entry = ether->rdesc_phys +
728 offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
729
730 if (val == entry)
731 break;
732
733 status = rxbd->sl;
734 length = status & 0xFFFF;
735
736 if (status & RXDS_RXGD) {
737 data = ether->rdesc->recv_buf[ether->cur_rx];
738 skb = dev_alloc_skb(length+2);
739 if (!skb) {
740 dev_err(&pdev->dev, "get skb buffer error\n");
741 ether->stats.rx_dropped++;
742 return;
743 }
744
745 skb_reserve(skb, 2);
746 skb_put(skb, length);
747 skb_copy_to_linear_data(skb, data, length);
748 skb->protocol = eth_type_trans(skb, dev);
749 ether->stats.rx_packets++;
750 ether->stats.rx_bytes += length;
751 netif_rx(skb);
752 } else {
753 ether->stats.rx_errors++;
754
755 if (status & RXDS_RP) {
756 dev_err(&pdev->dev, "rx runt err\n");
757 ether->stats.rx_length_errors++;
758 } else if (status & RXDS_CRCE) {
759 dev_err(&pdev->dev, "rx crc err\n");
760 ether->stats.rx_crc_errors++;
761 } else if (status & RXDS_ALIE) {
762 dev_err(&pdev->dev, "rx aligment err\n");
763 ether->stats.rx_frame_errors++;
764 } else if (status & RXDS_PTLE) {
765 dev_err(&pdev->dev, "rx longer err\n");
766 ether->stats.rx_over_errors++;
767 }
768 }
769
770 rxbd->sl = RX_OWEN_DMA;
771 rxbd->reserved = 0x0;
772
773 if (++ether->cur_rx >= RX_DESC_SIZE)
774 ether->cur_rx = 0;
775
776 rxbd = &ether->rdesc->desclist[ether->cur_rx];
777
778 } while (1);
779}
780
781static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
782{
783 struct net_device *dev;
784 struct w90p910_ether *ether;
785 struct platform_device *pdev;
786 unsigned int status;
787
788 dev = dev_id;
789 ether = netdev_priv(dev);
790 pdev = ether->pdev;
791
792 w90p910_get_and_clear_int(dev, &status);
793
794 if (status & MISTA_RDU) {
795 netdev_rx(dev);
796 w90p910_trigger_rx(dev);
797
798 return IRQ_HANDLED;
799 } else if (status & MISTA_RXBERR) {
800 dev_err(&pdev->dev, "emc rx bus error\n");
801 w90p910_reset_mac(dev);
802 }
803
804 netdev_rx(dev);
805 return IRQ_HANDLED;
806}
807
808static int w90p910_ether_open(struct net_device *dev)
809{
810 struct w90p910_ether *ether;
811 struct platform_device *pdev;
812
813 ether = netdev_priv(dev);
814 pdev = ether->pdev;
815
816 w90p910_reset_mac(dev);
817 w90p910_set_fifo_threshold(dev);
818 w90p910_set_curdest(dev);
819 w90p910_enable_cam(dev);
820 w90p910_enable_cam_command(dev);
821 w90p910_enable_mac_interrupt(dev);
822 w90p910_set_global_maccmd(dev);
823 w90p910_enable_rx(dev, 1);
824
825 clk_enable(ether->rmiiclk);
826 clk_enable(ether->clk);
827
828 ether->rx_packets = 0x0;
829 ether->rx_bytes = 0x0;
830
831 if (request_irq(ether->txirq, w90p910_tx_interrupt,
832 0x0, pdev->name, dev)) {
833 dev_err(&pdev->dev, "register irq tx failed\n");
834 return -EAGAIN;
835 }
836
837 if (request_irq(ether->rxirq, w90p910_rx_interrupt,
838 0x0, pdev->name, dev)) {
839 dev_err(&pdev->dev, "register irq rx failed\n");
840 free_irq(ether->txirq, dev);
841 return -EAGAIN;
842 }
843
844 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
845 netif_start_queue(dev);
846 w90p910_trigger_rx(dev);
847
848 dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
849
850 return 0;
851}
852
853static void w90p910_ether_set_multicast_list(struct net_device *dev)
854{
855 struct w90p910_ether *ether;
856 unsigned int rx_mode;
857
858 ether = netdev_priv(dev);
859
860 if (dev->flags & IFF_PROMISC)
861 rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
862 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
863 rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
864 else
865 rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
866 __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
867}
868
869static int w90p910_ether_ioctl(struct net_device *dev,
870 struct ifreq *ifr, int cmd)
871{
872 struct w90p910_ether *ether = netdev_priv(dev);
873 struct mii_ioctl_data *data = if_mii(ifr);
874
875 return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
876}
877
878static void w90p910_get_drvinfo(struct net_device *dev,
879 struct ethtool_drvinfo *info)
880{
881 strcpy(info->driver, DRV_MODULE_NAME);
882 strcpy(info->version, DRV_MODULE_VERSION);
883}
884
885static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
886{
887 struct w90p910_ether *ether = netdev_priv(dev);
888 return mii_ethtool_gset(&ether->mii, cmd);
889}
890
891static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
892{
893 struct w90p910_ether *ether = netdev_priv(dev);
894 return mii_ethtool_sset(&ether->mii, cmd);
895}
896
897static int w90p910_nway_reset(struct net_device *dev)
898{
899 struct w90p910_ether *ether = netdev_priv(dev);
900 return mii_nway_restart(&ether->mii);
901}
902
903static u32 w90p910_get_link(struct net_device *dev)
904{
905 struct w90p910_ether *ether = netdev_priv(dev);
906 return mii_link_ok(&ether->mii);
907}
908
909static const struct ethtool_ops w90p910_ether_ethtool_ops = {
910 .get_settings = w90p910_get_settings,
911 .set_settings = w90p910_set_settings,
912 .get_drvinfo = w90p910_get_drvinfo,
913 .nway_reset = w90p910_nway_reset,
914 .get_link = w90p910_get_link,
915};
916
917static const struct net_device_ops w90p910_ether_netdev_ops = {
918 .ndo_open = w90p910_ether_open,
919 .ndo_stop = w90p910_ether_close,
920 .ndo_start_xmit = w90p910_ether_start_xmit,
921 .ndo_get_stats = w90p910_ether_stats,
922 .ndo_set_multicast_list = w90p910_ether_set_multicast_list,
923 .ndo_set_mac_address = w90p910_set_mac_address,
924 .ndo_do_ioctl = w90p910_ether_ioctl,
925 .ndo_validate_addr = eth_validate_addr,
926 .ndo_change_mtu = eth_change_mtu,
927};
928
929static void __init get_mac_address(struct net_device *dev)
930{
931 struct w90p910_ether *ether = netdev_priv(dev);
932 struct platform_device *pdev;
933 char addr[6];
934
935 pdev = ether->pdev;
936
937 addr[0] = 0x00;
938 addr[1] = 0x02;
939 addr[2] = 0xac;
940 addr[3] = 0x55;
941 addr[4] = 0x88;
942 addr[5] = 0xa8;
943
944 if (is_valid_ether_addr(addr))
945 memcpy(dev->dev_addr, &addr, 0x06);
946 else
947 dev_err(&pdev->dev, "invalid mac address\n");
948}
949
950static int w90p910_ether_setup(struct net_device *dev)
951{
952 struct w90p910_ether *ether = netdev_priv(dev);
953
954 ether_setup(dev);
955 dev->netdev_ops = &w90p910_ether_netdev_ops;
956 dev->ethtool_ops = &w90p910_ether_ethtool_ops;
957
958 dev->tx_queue_len = 16;
959 dev->dma = 0x0;
960 dev->watchdog_timeo = TX_TIMEOUT;
961
962 get_mac_address(dev);
963
964 ether->cur_tx = 0x0;
965 ether->cur_rx = 0x0;
966 ether->finish_tx = 0x0;
967 ether->linkflag = 0x0;
968 ether->mii.phy_id = 0x01;
969 ether->mii.phy_id_mask = 0x1f;
970 ether->mii.reg_num_mask = 0x1f;
971 ether->mii.dev = dev;
972 ether->mii.mdio_read = w90p910_mdio_read;
973 ether->mii.mdio_write = w90p910_mdio_write;
974
975 setup_timer(&ether->check_timer, w90p910_check_link,
976 (unsigned long)dev);
977
978 return 0;
979}
980
981static int __devinit w90p910_ether_probe(struct platform_device *pdev)
982{
983 struct w90p910_ether *ether;
984 struct net_device *dev;
985 int error;
986
987 dev = alloc_etherdev(sizeof(struct w90p910_ether));
988 if (!dev)
989 return -ENOMEM;
990
991 ether = netdev_priv(dev);
992
993 ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
994 if (ether->res == NULL) {
995 dev_err(&pdev->dev, "failed to get I/O memory\n");
996 error = -ENXIO;
997 goto failed_free;
998 }
999
1000 if (!request_mem_region(ether->res->start,
1001 resource_size(ether->res), pdev->name)) {
1002 dev_err(&pdev->dev, "failed to request I/O memory\n");
1003 error = -EBUSY;
1004 goto failed_free;
1005 }
1006
1007 ether->reg = ioremap(ether->res->start, resource_size(ether->res));
1008 if (ether->reg == NULL) {
1009 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1010 error = -ENXIO;
1011 goto failed_free_mem;
1012 }
1013
1014 ether->txirq = platform_get_irq(pdev, 0);
1015 if (ether->txirq < 0) {
1016 dev_err(&pdev->dev, "failed to get ether tx irq\n");
1017 error = -ENXIO;
1018 goto failed_free_io;
1019 }
1020
1021 ether->rxirq = platform_get_irq(pdev, 1);
1022 if (ether->rxirq < 0) {
1023 dev_err(&pdev->dev, "failed to get ether rx irq\n");
1024 error = -ENXIO;
1025 goto failed_free_txirq;
1026 }
1027
1028 platform_set_drvdata(pdev, dev);
1029
1030 ether->clk = clk_get(&pdev->dev, NULL);
1031 if (IS_ERR(ether->clk)) {
1032 dev_err(&pdev->dev, "failed to get ether clock\n");
1033 error = PTR_ERR(ether->clk);
1034 goto failed_free_rxirq;
1035 }
1036
1037 ether->rmiiclk = clk_get(&pdev->dev, "RMII");
1038 if (IS_ERR(ether->rmiiclk)) {
1039 dev_err(&pdev->dev, "failed to get ether clock\n");
1040 error = PTR_ERR(ether->rmiiclk);
1041 goto failed_put_clk;
1042 }
1043
1044 ether->pdev = pdev;
1045
1046 w90p910_ether_setup(dev);
1047
1048 error = register_netdev(dev);
1049 if (error != 0) {
1050 dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
1051 error = -ENODEV;
1052 goto failed_put_rmiiclk;
1053 }
1054
1055 return 0;
1056failed_put_rmiiclk:
1057 clk_put(ether->rmiiclk);
1058failed_put_clk:
1059 clk_put(ether->clk);
1060failed_free_rxirq:
1061 free_irq(ether->rxirq, pdev);
1062 platform_set_drvdata(pdev, NULL);
1063failed_free_txirq:
1064 free_irq(ether->txirq, pdev);
1065failed_free_io:
1066 iounmap(ether->reg);
1067failed_free_mem:
1068 release_mem_region(ether->res->start, resource_size(ether->res));
1069failed_free:
1070 free_netdev(dev);
1071 return error;
1072}
1073
1074static int __devexit w90p910_ether_remove(struct platform_device *pdev)
1075{
1076 struct net_device *dev = platform_get_drvdata(pdev);
1077 struct w90p910_ether *ether = netdev_priv(dev);
1078
1079 unregister_netdev(dev);
1080
1081 clk_put(ether->rmiiclk);
1082 clk_put(ether->clk);
1083
1084 iounmap(ether->reg);
1085 release_mem_region(ether->res->start, resource_size(ether->res));
1086
1087 free_irq(ether->txirq, dev);
1088 free_irq(ether->rxirq, dev);
1089
1090 del_timer_sync(&ether->check_timer);
1091 platform_set_drvdata(pdev, NULL);
1092
1093 free_netdev(dev);
1094 return 0;
1095}
1096
1097static struct platform_driver w90p910_ether_driver = {
1098 .probe = w90p910_ether_probe,
1099 .remove = __devexit_p(w90p910_ether_remove),
1100 .driver = {
1101 .name = "nuc900-emc",
1102 .owner = THIS_MODULE,
1103 },
1104};
1105
1106static int __init w90p910_ether_init(void)
1107{
1108 return platform_driver_register(&w90p910_ether_driver);
1109}
1110
1111static void __exit w90p910_ether_exit(void)
1112{
1113 platform_driver_unregister(&w90p910_ether_driver);
1114}
1115
1116module_init(w90p910_ether_init);
1117module_exit(w90p910_ether_exit);
1118
1119MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
1120MODULE_DESCRIPTION("w90p910 MAC driver!");
1121MODULE_LICENSE("GPL");
1122MODULE_ALIAS("platform:nuc900-emc");
1123