aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sgi
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-20 10:50:27 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-12 03:21:33 -0400
commit8862bf1ed60de49550109b7023a0a33eb7db8b3c (patch)
tree76e2c24e847a986c139061f5d522aa94ce97e083 /drivers/net/ethernet/sgi
parenta88394cfb58007cca945699545469017beb0d206 (diff)
ioc3-eth/meth: Move the SGI drivers
Move the SGI drivers into drivers/net/ethernet/sgi/ and make the necessary Kconfig and Makefile changes. CC: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/sgi')
-rw-r--r--drivers/net/ethernet/sgi/Kconfig34
-rw-r--r--drivers/net/ethernet/sgi/Makefile6
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1684
-rw-r--r--drivers/net/ethernet/sgi/meth.c855
-rw-r--r--drivers/net/ethernet/sgi/meth.h243
5 files changed, 2822 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
new file mode 100644
index 000000000000..3098594ab274
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -0,0 +1,34 @@
1#
2# SGI device configuration
3#
4
5config NET_VENDOR_SGI
6 bool "SGI devices"
7 depends on (PCI && SGI_IP27) || SGI_IP32
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about SGI devices. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_SGI
19
20config SGI_IOC3_ETH
21 bool "SGI IOC3 Ethernet"
22 depends on PCI && SGI_IP27
23 select CRC32
24 select MII
25 ---help---
26 If you have a network (Ethernet) card of this type, say Y and read
27 the Ethernet-HOWTO, available from
28 <http://www.tldp.org/docs.html#howto>.
29
30config SGI_O2MACE_ETH
31 tristate "SGI O2 MACE Fast Ethernet support"
32 depends on SGI_IP32=y
33
34endif # NET_VENDOR_SGI
diff --git a/drivers/net/ethernet/sgi/Makefile b/drivers/net/ethernet/sgi/Makefile
new file mode 100644
index 000000000000..e5bedd271e29
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the SGI device drivers.
3#
4
5obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
6obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
new file mode 100644
index 000000000000..a234e4504522
--- /dev/null
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -0,0 +1,1684 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
7 *
8 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
10 *
11 * References:
12 * o IOC3 ASIC specification 4.51, 1996-04-18
13 * o IEEE 802.3 specification, 2000 edition
14 * o DP38840A Specification, National Semiconductor, March 1997
15 *
16 * To do:
17 *
18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
19 * o Handle allocation failures in ioc3_init_rings().
20 * o Use prefetching for large packets. What is a good lower limit for
21 * prefetching?
22 * o We're probably allocating a bit too much memory.
23 * o Use hardware checksums.
24 * o Convert to using a IOC3 meta driver.
25 * o Which PHYs might possibly be attached to the IOC3 in real live,
26 * which workarounds are required for them? Do we ever have Lucent's?
27 * o For the 2.5 branch kill the mii-tool ioctls.
28 */
29
30#define IOC3_NAME "ioc3-eth"
31#define IOC3_VERSION "2.6.3-4"
32
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/kernel.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/crc32.h>
41#include <linux/mii.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/udp.h>
46#include <linux/dma-mapping.h>
47#include <linux/gfp.h>
48
49#ifdef CONFIG_SERIAL_8250
50#include <linux/serial_core.h>
51#include <linux/serial_8250.h>
52#include <linux/serial_reg.h>
53#endif
54
55#include <linux/netdevice.h>
56#include <linux/etherdevice.h>
57#include <linux/ethtool.h>
58#include <linux/skbuff.h>
59#include <net/ip.h>
60
61#include <asm/byteorder.h>
62#include <asm/io.h>
63#include <asm/pgtable.h>
64#include <asm/uaccess.h>
65#include <asm/sn/types.h>
66#include <asm/sn/ioc3.h>
67#include <asm/pci/bridge.h>
68
69/*
70 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
71 * value must be a power of two.
72 */
73#define RX_BUFFS 64
74
75#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
76#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
77
78/* Private per NIC data of the driver. */
79struct ioc3_private {
80 struct ioc3 *regs;
81 unsigned long *rxr; /* pointer to receiver ring */
82 struct ioc3_etxd *txr;
83 struct sk_buff *rx_skbs[512];
84 struct sk_buff *tx_skbs[128];
85 int rx_ci; /* RX consumer index */
86 int rx_pi; /* RX producer index */
87 int tx_ci; /* TX consumer index */
88 int tx_pi; /* TX producer index */
89 int txqlen;
90 u32 emcr, ehar_h, ehar_l;
91 spinlock_t ioc3_lock;
92 struct mii_if_info mii;
93
94 struct pci_dev *pdev;
95
96 /* Members used by autonegotiation */
97 struct timer_list ioc3_timer;
98};
99
100static inline struct net_device *priv_netdev(struct ioc3_private *dev)
101{
102 return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
103}
104
105static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
106static void ioc3_set_multicast_list(struct net_device *dev);
107static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
108static void ioc3_timeout(struct net_device *dev);
109static inline unsigned int ioc3_hash(const unsigned char *addr);
110static inline void ioc3_stop(struct ioc3_private *ip);
111static void ioc3_init(struct net_device *dev);
112
113static const char ioc3_str[] = "IOC3 Ethernet";
114static const struct ethtool_ops ioc3_ethtool_ops;
115
116/* We use this to acquire receive skb's that we can DMA directly into. */
117
118#define IOC3_CACHELINE 128UL
119
120static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
121{
122 return (~addr + 1) & (IOC3_CACHELINE - 1UL);
123}
124
125static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
126 unsigned int gfp_mask)
127{
128 struct sk_buff *skb;
129
130 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
131 if (likely(skb)) {
132 int offset = aligned_rx_skb_addr((unsigned long) skb->data);
133 if (offset)
134 skb_reserve(skb, offset);
135 }
136
137 return skb;
138}
139
140static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
141{
142#ifdef CONFIG_SGI_IP27
143 vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
144
145 return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
146 ((unsigned long)ptr & TO_PHYS_MASK);
147#else
148 return virt_to_bus(ptr);
149#endif
150}
151
152/* BEWARE: The IOC3 documentation documents the size of rx buffers as
153 1644 while it's actually 1664. This one was nasty to track down ... */
154#define RX_OFFSET 10
155#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
156
157/* DMA barrier to separate cached and uncached accesses. */
158#define BARRIER() \
159 __asm__("sync" ::: "memory")
160
161
162#define IOC3_SIZE 0x100000
163
164/*
165 * IOC3 is a big endian device
166 *
167 * Unorthodox but makes the users of these macros more readable - the pointer
168 * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
169 * in the environment.
170 */
171#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
172#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
173#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
174#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
175#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
176#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
177#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
178#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
179#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
180#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
181#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
182#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
183#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
184#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
185#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
186#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
187#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
188#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
189#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
190#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
191#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
192#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
193#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
194#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
195#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
196#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
197#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
198#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
199#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
200#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
201#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
202#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
203#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
204#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
205#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
206#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
207#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
208#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
209#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
210#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
211#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
212#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
213#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
214#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
215#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
216#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
217#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
218#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
219#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
220#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
221#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
222#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
223#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
224
225static inline u32 mcr_pack(u32 pulse, u32 sample)
226{
227 return (pulse << 10) | (sample << 2);
228}
229
230static int nic_wait(struct ioc3 *ioc3)
231{
232 u32 mcr;
233
234 do {
235 mcr = ioc3_r_mcr();
236 } while (!(mcr & 2));
237
238 return mcr & 1;
239}
240
241static int nic_reset(struct ioc3 *ioc3)
242{
243 int presence;
244
245 ioc3_w_mcr(mcr_pack(500, 65));
246 presence = nic_wait(ioc3);
247
248 ioc3_w_mcr(mcr_pack(0, 500));
249 nic_wait(ioc3);
250
251 return presence;
252}
253
254static inline int nic_read_bit(struct ioc3 *ioc3)
255{
256 int result;
257
258 ioc3_w_mcr(mcr_pack(6, 13));
259 result = nic_wait(ioc3);
260 ioc3_w_mcr(mcr_pack(0, 100));
261 nic_wait(ioc3);
262
263 return result;
264}
265
266static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
267{
268 if (bit)
269 ioc3_w_mcr(mcr_pack(6, 110));
270 else
271 ioc3_w_mcr(mcr_pack(80, 30));
272
273 nic_wait(ioc3);
274}
275
276/*
277 * Read a byte from an iButton device
278 */
279static u32 nic_read_byte(struct ioc3 *ioc3)
280{
281 u32 result = 0;
282 int i;
283
284 for (i = 0; i < 8; i++)
285 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
286
287 return result;
288}
289
290/*
291 * Write a byte to an iButton device
292 */
293static void nic_write_byte(struct ioc3 *ioc3, int byte)
294{
295 int i, bit;
296
297 for (i = 8; i; i--) {
298 bit = byte & 1;
299 byte >>= 1;
300
301 nic_write_bit(ioc3, bit);
302 }
303}
304
305static u64 nic_find(struct ioc3 *ioc3, int *last)
306{
307 int a, b, index, disc;
308 u64 address = 0;
309
310 nic_reset(ioc3);
311 /* Search ROM. */
312 nic_write_byte(ioc3, 0xf0);
313
314 /* Algorithm from ``Book of iButton Standards''. */
315 for (index = 0, disc = 0; index < 64; index++) {
316 a = nic_read_bit(ioc3);
317 b = nic_read_bit(ioc3);
318
319 if (a && b) {
320 printk("NIC search failed (not fatal).\n");
321 *last = 0;
322 return 0;
323 }
324
325 if (!a && !b) {
326 if (index == *last) {
327 address |= 1UL << index;
328 } else if (index > *last) {
329 address &= ~(1UL << index);
330 disc = index;
331 } else if ((address & (1UL << index)) == 0)
332 disc = index;
333 nic_write_bit(ioc3, address & (1UL << index));
334 continue;
335 } else {
336 if (a)
337 address |= 1UL << index;
338 else
339 address &= ~(1UL << index);
340 nic_write_bit(ioc3, a);
341 continue;
342 }
343 }
344
345 *last = disc;
346
347 return address;
348}
349
350static int nic_init(struct ioc3 *ioc3)
351{
352 const char *unknown = "unknown";
353 const char *type = unknown;
354 u8 crc;
355 u8 serial[6];
356 int save = 0, i;
357
358 while (1) {
359 u64 reg;
360 reg = nic_find(ioc3, &save);
361
362 switch (reg & 0xff) {
363 case 0x91:
364 type = "DS1981U";
365 break;
366 default:
367 if (save == 0) {
368 /* Let the caller try again. */
369 return -1;
370 }
371 continue;
372 }
373
374 nic_reset(ioc3);
375
376 /* Match ROM. */
377 nic_write_byte(ioc3, 0x55);
378 for (i = 0; i < 8; i++)
379 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
380
381 reg >>= 8; /* Shift out type. */
382 for (i = 0; i < 6; i++) {
383 serial[i] = reg & 0xff;
384 reg >>= 8;
385 }
386 crc = reg & 0xff;
387 break;
388 }
389
390 printk("Found %s NIC", type);
391 if (type != unknown)
392 printk (" registration number %pM, CRC %02x", serial, crc);
393 printk(".\n");
394
395 return 0;
396}
397
398/*
399 * Read the NIC (Number-In-a-Can) device used to store the MAC address on
400 * SN0 / SN00 nodeboards and PCI cards.
401 */
402static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
403{
404 struct ioc3 *ioc3 = ip->regs;
405 u8 nic[14];
406 int tries = 2; /* There may be some problem with the battery? */
407 int i;
408
409 ioc3_w_gpcr_s(1 << 21);
410
411 while (tries--) {
412 if (!nic_init(ioc3))
413 break;
414 udelay(500);
415 }
416
417 if (tries < 0) {
418 printk("Failed to read MAC address\n");
419 return;
420 }
421
422 /* Read Memory. */
423 nic_write_byte(ioc3, 0xf0);
424 nic_write_byte(ioc3, 0x00);
425 nic_write_byte(ioc3, 0x00);
426
427 for (i = 13; i >= 0; i--)
428 nic[i] = nic_read_byte(ioc3);
429
430 for (i = 2; i < 8; i++)
431 priv_netdev(ip)->dev_addr[i - 2] = nic[i];
432}
433
434/*
435 * Ok, this is hosed by design. It's necessary to know what machine the
436 * NIC is in in order to know how to read the NIC address. We also have
437 * to know if it's a PCI card or a NIC in on the node board ...
438 */
439static void ioc3_get_eaddr(struct ioc3_private *ip)
440{
441 ioc3_get_eaddr_nic(ip);
442
443 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
444}
445
446static void __ioc3_set_mac_address(struct net_device *dev)
447{
448 struct ioc3_private *ip = netdev_priv(dev);
449 struct ioc3 *ioc3 = ip->regs;
450
451 ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
452 ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
453 (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
454}
455
456static int ioc3_set_mac_address(struct net_device *dev, void *addr)
457{
458 struct ioc3_private *ip = netdev_priv(dev);
459 struct sockaddr *sa = addr;
460
461 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
462
463 spin_lock_irq(&ip->ioc3_lock);
464 __ioc3_set_mac_address(dev);
465 spin_unlock_irq(&ip->ioc3_lock);
466
467 return 0;
468}
469
470/*
471 * Caller must hold the ioc3_lock ever for MII readers. This is also
472 * used to protect the transmitter side but it's low contention.
473 */
474static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
475{
476 struct ioc3_private *ip = netdev_priv(dev);
477 struct ioc3 *ioc3 = ip->regs;
478
479 while (ioc3_r_micr() & MICR_BUSY);
480 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
481 while (ioc3_r_micr() & MICR_BUSY);
482
483 return ioc3_r_midr_r() & MIDR_DATA_MASK;
484}
485
486static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
487{
488 struct ioc3_private *ip = netdev_priv(dev);
489 struct ioc3 *ioc3 = ip->regs;
490
491 while (ioc3_r_micr() & MICR_BUSY);
492 ioc3_w_midr_w(data);
493 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
494 while (ioc3_r_micr() & MICR_BUSY);
495}
496
497static int ioc3_mii_init(struct ioc3_private *ip);
498
499static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
500{
501 struct ioc3_private *ip = netdev_priv(dev);
502 struct ioc3 *ioc3 = ip->regs;
503
504 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
505 return &dev->stats;
506}
507
508static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
509{
510 struct ethhdr *eh = eth_hdr(skb);
511 uint32_t csum, ehsum;
512 unsigned int proto;
513 struct iphdr *ih;
514 uint16_t *ew;
515 unsigned char *cp;
516
517 /*
518 * Did hardware handle the checksum at all? The cases we can handle
519 * are:
520 *
521 * - TCP and UDP checksums of IPv4 only.
522 * - IPv6 would be doable but we keep that for later ...
523 * - Only unfragmented packets. Did somebody already tell you
524 * fragmentation is evil?
525 * - don't care about packet size. Worst case when processing a
526 * malformed packet we'll try to access the packet at ip header +
527 * 64 bytes which is still inside the skb. Even in the unlikely
528 * case where the checksum is right the higher layers will still
529 * drop the packet as appropriate.
530 */
531 if (eh->h_proto != htons(ETH_P_IP))
532 return;
533
534 ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
535 if (ip_is_fragment(ih))
536 return;
537
538 proto = ih->protocol;
539 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
540 return;
541
542 /* Same as tx - compute csum of pseudo header */
543 csum = hwsum +
544 (ih->tot_len - (ih->ihl << 2)) +
545 htons((uint16_t)ih->protocol) +
546 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
547 (ih->daddr >> 16) + (ih->daddr & 0xffff);
548
549 /* Sum up ethernet dest addr, src addr and protocol */
550 ew = (uint16_t *) eh;
551 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
552
553 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
554 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
555
556 csum += 0xffff ^ ehsum;
557
558 /* In the next step we also subtract the 1's complement
559 checksum of the trailing ethernet CRC. */
560 cp = (char *)eh + len; /* points at trailing CRC */
561 if (len & 1) {
562 csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
563 csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
564 } else {
565 csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
566 csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
567 }
568
569 csum = (csum & 0xffff) + (csum >> 16);
570 csum = (csum & 0xffff) + (csum >> 16);
571
572 if (csum == 0xffff)
573 skb->ip_summed = CHECKSUM_UNNECESSARY;
574}
575
576static inline void ioc3_rx(struct net_device *dev)
577{
578 struct ioc3_private *ip = netdev_priv(dev);
579 struct sk_buff *skb, *new_skb;
580 struct ioc3 *ioc3 = ip->regs;
581 int rx_entry, n_entry, len;
582 struct ioc3_erxbuf *rxb;
583 unsigned long *rxr;
584 u32 w0, err;
585
586 rxr = (unsigned long *) ip->rxr; /* Ring base */
587 rx_entry = ip->rx_ci; /* RX consume index */
588 n_entry = ip->rx_pi;
589
590 skb = ip->rx_skbs[rx_entry];
591 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
592 w0 = be32_to_cpu(rxb->w0);
593
594 while (w0 & ERXBUF_V) {
595 err = be32_to_cpu(rxb->err); /* It's valid ... */
596 if (err & ERXBUF_GOODPKT) {
597 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
598 skb_trim(skb, len);
599 skb->protocol = eth_type_trans(skb, dev);
600
601 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
602 if (!new_skb) {
603 /* Ouch, drop packet and just recycle packet
604 to keep the ring filled. */
605 dev->stats.rx_dropped++;
606 new_skb = skb;
607 goto next;
608 }
609
610 if (likely(dev->features & NETIF_F_RXCSUM))
611 ioc3_tcpudp_checksum(skb,
612 w0 & ERXBUF_IPCKSUM_MASK, len);
613
614 netif_rx(skb);
615
616 ip->rx_skbs[rx_entry] = NULL; /* Poison */
617
618 /* Because we reserve afterwards. */
619 skb_put(new_skb, (1664 + RX_OFFSET));
620 rxb = (struct ioc3_erxbuf *) new_skb->data;
621 skb_reserve(new_skb, RX_OFFSET);
622
623 dev->stats.rx_packets++; /* Statistics */
624 dev->stats.rx_bytes += len;
625 } else {
626 /* The frame is invalid and the skb never
627 reached the network layer so we can just
628 recycle it. */
629 new_skb = skb;
630 dev->stats.rx_errors++;
631 }
632 if (err & ERXBUF_CRCERR) /* Statistics */
633 dev->stats.rx_crc_errors++;
634 if (err & ERXBUF_FRAMERR)
635 dev->stats.rx_frame_errors++;
636next:
637 ip->rx_skbs[n_entry] = new_skb;
638 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
639 rxb->w0 = 0; /* Clear valid flag */
640 n_entry = (n_entry + 1) & 511; /* Update erpir */
641
642 /* Now go on to the next ring entry. */
643 rx_entry = (rx_entry + 1) & 511;
644 skb = ip->rx_skbs[rx_entry];
645 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
646 w0 = be32_to_cpu(rxb->w0);
647 }
648 ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
649 ip->rx_pi = n_entry;
650 ip->rx_ci = rx_entry;
651}
652
653static inline void ioc3_tx(struct net_device *dev)
654{
655 struct ioc3_private *ip = netdev_priv(dev);
656 unsigned long packets, bytes;
657 struct ioc3 *ioc3 = ip->regs;
658 int tx_entry, o_entry;
659 struct sk_buff *skb;
660 u32 etcir;
661
662 spin_lock(&ip->ioc3_lock);
663 etcir = ioc3_r_etcir();
664
665 tx_entry = (etcir >> 7) & 127;
666 o_entry = ip->tx_ci;
667 packets = 0;
668 bytes = 0;
669
670 while (o_entry != tx_entry) {
671 packets++;
672 skb = ip->tx_skbs[o_entry];
673 bytes += skb->len;
674 dev_kfree_skb_irq(skb);
675 ip->tx_skbs[o_entry] = NULL;
676
677 o_entry = (o_entry + 1) & 127; /* Next */
678
679 etcir = ioc3_r_etcir(); /* More pkts sent? */
680 tx_entry = (etcir >> 7) & 127;
681 }
682
683 dev->stats.tx_packets += packets;
684 dev->stats.tx_bytes += bytes;
685 ip->txqlen -= packets;
686
687 if (ip->txqlen < 128)
688 netif_wake_queue(dev);
689
690 ip->tx_ci = o_entry;
691 spin_unlock(&ip->ioc3_lock);
692}
693
694/*
695 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
696 * software problems, so we should try to recover
697 * more gracefully if this ever happens. In theory we might be flooded
698 * with such error interrupts if something really goes wrong, so we might
699 * also consider to take the interface down.
700 */
701static void ioc3_error(struct net_device *dev, u32 eisr)
702{
703 struct ioc3_private *ip = netdev_priv(dev);
704 unsigned char *iface = dev->name;
705
706 spin_lock(&ip->ioc3_lock);
707
708 if (eisr & EISR_RXOFLO)
709 printk(KERN_ERR "%s: RX overflow.\n", iface);
710 if (eisr & EISR_RXBUFOFLO)
711 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
712 if (eisr & EISR_RXMEMERR)
713 printk(KERN_ERR "%s: RX PCI error.\n", iface);
714 if (eisr & EISR_RXPARERR)
715 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
716 if (eisr & EISR_TXBUFUFLO)
717 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
718 if (eisr & EISR_TXMEMERR)
719 printk(KERN_ERR "%s: TX PCI error.\n", iface);
720
721 ioc3_stop(ip);
722 ioc3_init(dev);
723 ioc3_mii_init(ip);
724
725 netif_wake_queue(dev);
726
727 spin_unlock(&ip->ioc3_lock);
728}
729
730/* The interrupt handler does all of the Rx thread work and cleans up
731 after the Tx thread. */
732static irqreturn_t ioc3_interrupt(int irq, void *_dev)
733{
734 struct net_device *dev = (struct net_device *)_dev;
735 struct ioc3_private *ip = netdev_priv(dev);
736 struct ioc3 *ioc3 = ip->regs;
737 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
738 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
739 EISR_TXEXPLICIT | EISR_TXMEMERR;
740 u32 eisr;
741
742 eisr = ioc3_r_eisr() & enabled;
743
744 ioc3_w_eisr(eisr);
745 (void) ioc3_r_eisr(); /* Flush */
746
747 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
748 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
749 ioc3_error(dev, eisr);
750 if (eisr & EISR_RXTIMERINT)
751 ioc3_rx(dev);
752 if (eisr & EISR_TXEXPLICIT)
753 ioc3_tx(dev);
754
755 return IRQ_HANDLED;
756}
757
758static inline void ioc3_setup_duplex(struct ioc3_private *ip)
759{
760 struct ioc3 *ioc3 = ip->regs;
761
762 if (ip->mii.full_duplex) {
763 ioc3_w_etcsr(ETCSR_FD);
764 ip->emcr |= EMCR_DUPLEX;
765 } else {
766 ioc3_w_etcsr(ETCSR_HD);
767 ip->emcr &= ~EMCR_DUPLEX;
768 }
769 ioc3_w_emcr(ip->emcr);
770}
771
772static void ioc3_timer(unsigned long data)
773{
774 struct ioc3_private *ip = (struct ioc3_private *) data;
775
776 /* Print the link status if it has changed */
777 mii_check_media(&ip->mii, 1, 0);
778 ioc3_setup_duplex(ip);
779
780 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
781 add_timer(&ip->ioc3_timer);
782}
783
784/*
785 * Try to find a PHY. There is no apparent relation between the MII addresses
786 * in the SGI documentation and what we find in reality, so we simply probe
787 * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
788 * onboard IOC3s has the special oddity that probing doesn't seem to find it
789 * yet the interface seems to work fine, so if probing fails we for now will
790 * simply default to PHY 31 instead of bailing out.
791 */
792static int ioc3_mii_init(struct ioc3_private *ip)
793{
794 struct net_device *dev = priv_netdev(ip);
795 int i, found = 0, res = 0;
796 int ioc3_phy_workaround = 1;
797 u16 word;
798
799 for (i = 0; i < 32; i++) {
800 word = ioc3_mdio_read(dev, i, MII_PHYSID1);
801
802 if (word != 0xffff && word != 0x0000) {
803 found = 1;
804 break; /* Found a PHY */
805 }
806 }
807
808 if (!found) {
809 if (ioc3_phy_workaround)
810 i = 31;
811 else {
812 ip->mii.phy_id = -1;
813 res = -ENODEV;
814 goto out;
815 }
816 }
817
818 ip->mii.phy_id = i;
819
820out:
821 return res;
822}
823
824static void ioc3_mii_start(struct ioc3_private *ip)
825{
826 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
827 ip->ioc3_timer.data = (unsigned long) ip;
828 ip->ioc3_timer.function = ioc3_timer;
829 add_timer(&ip->ioc3_timer);
830}
831
832static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
833{
834 struct sk_buff *skb;
835 int i;
836
837 for (i = ip->rx_ci; i & 15; i++) {
838 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
839 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
840 }
841 ip->rx_pi &= 511;
842 ip->rx_ci &= 511;
843
844 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
845 struct ioc3_erxbuf *rxb;
846 skb = ip->rx_skbs[i];
847 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
848 rxb->w0 = 0;
849 }
850}
851
852static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
853{
854 struct sk_buff *skb;
855 int i;
856
857 for (i=0; i < 128; i++) {
858 skb = ip->tx_skbs[i];
859 if (skb) {
860 ip->tx_skbs[i] = NULL;
861 dev_kfree_skb_any(skb);
862 }
863 ip->txr[i].cmd = 0;
864 }
865 ip->tx_pi = 0;
866 ip->tx_ci = 0;
867}
868
869static void ioc3_free_rings(struct ioc3_private *ip)
870{
871 struct sk_buff *skb;
872 int rx_entry, n_entry;
873
874 if (ip->txr) {
875 ioc3_clean_tx_ring(ip);
876 free_pages((unsigned long)ip->txr, 2);
877 ip->txr = NULL;
878 }
879
880 if (ip->rxr) {
881 n_entry = ip->rx_ci;
882 rx_entry = ip->rx_pi;
883
884 while (n_entry != rx_entry) {
885 skb = ip->rx_skbs[n_entry];
886 if (skb)
887 dev_kfree_skb_any(skb);
888
889 n_entry = (n_entry + 1) & 511;
890 }
891 free_page((unsigned long)ip->rxr);
892 ip->rxr = NULL;
893 }
894}
895
896static void ioc3_alloc_rings(struct net_device *dev)
897{
898 struct ioc3_private *ip = netdev_priv(dev);
899 struct ioc3_erxbuf *rxb;
900 unsigned long *rxr;
901 int i;
902
903 if (ip->rxr == NULL) {
904 /* Allocate and initialize rx ring. 4kb = 512 entries */
905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
906 rxr = (unsigned long *) ip->rxr;
907 if (!rxr)
908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
909
910 /* Now the rx buffers. The RX ring may be larger but
911 we only allocate 16 buffers for now. Need to tune
912 this for performance and memory later. */
913 for (i = 0; i < RX_BUFFS; i++) {
914 struct sk_buff *skb;
915
916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
917 if (!skb) {
918 show_free_areas(0);
919 continue;
920 }
921
922 ip->rx_skbs[i] = skb;
923
924 /* Because we reserve afterwards. */
925 skb_put(skb, (1664 + RX_OFFSET));
926 rxb = (struct ioc3_erxbuf *) skb->data;
927 rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
928 skb_reserve(skb, RX_OFFSET);
929 }
930 ip->rx_ci = 0;
931 ip->rx_pi = RX_BUFFS;
932 }
933
934 if (ip->txr == NULL) {
935 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
936 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
937 if (!ip->txr)
938 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
939 ip->tx_pi = 0;
940 ip->tx_ci = 0;
941 }
942}
943
944static void ioc3_init_rings(struct net_device *dev)
945{
946 struct ioc3_private *ip = netdev_priv(dev);
947 struct ioc3 *ioc3 = ip->regs;
948 unsigned long ring;
949
950 ioc3_free_rings(ip);
951 ioc3_alloc_rings(dev);
952
953 ioc3_clean_rx_ring(ip);
954 ioc3_clean_tx_ring(ip);
955
956 /* Now the rx ring base, consume & produce registers. */
957 ring = ioc3_map(ip->rxr, 0);
958 ioc3_w_erbr_h(ring >> 32);
959 ioc3_w_erbr_l(ring & 0xffffffff);
960 ioc3_w_ercir(ip->rx_ci << 3);
961 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
962
963 ring = ioc3_map(ip->txr, 0);
964
965 ip->txqlen = 0; /* nothing queued */
966
967 /* Now the tx ring base, consume & produce registers. */
968 ioc3_w_etbr_h(ring >> 32);
969 ioc3_w_etbr_l(ring & 0xffffffff);
970 ioc3_w_etpir(ip->tx_pi << 7);
971 ioc3_w_etcir(ip->tx_ci << 7);
972 (void) ioc3_r_etcir(); /* Flush */
973}
974
975static inline void ioc3_ssram_disc(struct ioc3_private *ip)
976{
977 struct ioc3 *ioc3 = ip->regs;
978 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
979 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
980 unsigned int pattern = 0x5555;
981
982 /* Assume the larger size SSRAM and enable parity checking */
983 ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
984
985 *ssram0 = pattern;
986 *ssram1 = ~pattern & IOC3_SSRAM_DM;
987
988 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
989 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
990 /* set ssram size to 64 KB */
991 ip->emcr = EMCR_RAMPAR;
992 ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
993 } else
994 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
995}
996
997static void ioc3_init(struct net_device *dev)
998{
999 struct ioc3_private *ip = netdev_priv(dev);
1000 struct ioc3 *ioc3 = ip->regs;
1001
1002 del_timer_sync(&ip->ioc3_timer); /* Kill if running */
1003
1004 ioc3_w_emcr(EMCR_RST); /* Reset */
1005 (void) ioc3_r_emcr(); /* Flush WB */
1006 udelay(4); /* Give it time ... */
1007 ioc3_w_emcr(0);
1008 (void) ioc3_r_emcr();
1009
1010 /* Misc registers */
1011#ifdef CONFIG_SGI_IP27
1012 ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
1013#else
1014 ioc3_w_erbar(0); /* Let PCI API get it right */
1015#endif
1016 (void) ioc3_r_etcdc(); /* Clear on read */
1017 ioc3_w_ercsr(15); /* RX low watermark */
1018 ioc3_w_ertr(0); /* Interrupt immediately */
1019 __ioc3_set_mac_address(dev);
1020 ioc3_w_ehar_h(ip->ehar_h);
1021 ioc3_w_ehar_l(ip->ehar_l);
1022 ioc3_w_ersr(42); /* XXX should be random */
1023
1024 ioc3_init_rings(dev);
1025
1026 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1027 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
1028 ioc3_w_emcr(ip->emcr);
1029 ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1030 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1031 EISR_TXEXPLICIT | EISR_TXMEMERR);
1032 (void) ioc3_r_eier();
1033}
1034
1035static inline void ioc3_stop(struct ioc3_private *ip)
1036{
1037 struct ioc3 *ioc3 = ip->regs;
1038
1039 ioc3_w_emcr(0); /* Shutup */
1040 ioc3_w_eier(0); /* Disable interrupts */
1041 (void) ioc3_r_eier(); /* Flush */
1042}
1043
1044static int ioc3_open(struct net_device *dev)
1045{
1046 struct ioc3_private *ip = netdev_priv(dev);
1047
1048 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
1049 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1050
1051 return -EAGAIN;
1052 }
1053
1054 ip->ehar_h = 0;
1055 ip->ehar_l = 0;
1056 ioc3_init(dev);
1057 ioc3_mii_start(ip);
1058
1059 netif_start_queue(dev);
1060 return 0;
1061}
1062
1063static int ioc3_close(struct net_device *dev)
1064{
1065 struct ioc3_private *ip = netdev_priv(dev);
1066
1067 del_timer_sync(&ip->ioc3_timer);
1068
1069 netif_stop_queue(dev);
1070
1071 ioc3_stop(ip);
1072 free_irq(dev->irq, dev);
1073
1074 ioc3_free_rings(ip);
1075 return 0;
1076}
1077
1078/*
1079 * MENET cards have four IOC3 chips, which are attached to two sets of
1080 * PCI slot resources each: the primary connections are on slots
1081 * 0..3 and the secondaries are on 4..7
1082 *
1083 * All four ethernets are brought out to connectors; six serial ports
1084 * (a pair from each of the first three IOC3s) are brought out to
1085 * MiniDINs; all other subdevices are left swinging in the wind, leave
1086 * them disabled.
1087 */
1088
1089static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1090{
1091 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1092 int ret = 0;
1093
1094 if (dev) {
1095 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1096 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1097 ret = 1;
1098 pci_dev_put(dev);
1099 }
1100
1101 return ret;
1102}
1103
1104static int ioc3_is_menet(struct pci_dev *pdev)
1105{
1106 return pdev->bus->parent == NULL &&
1107 ioc3_adjacent_is_ioc3(pdev, 0) &&
1108 ioc3_adjacent_is_ioc3(pdev, 1) &&
1109 ioc3_adjacent_is_ioc3(pdev, 2);
1110}
1111
1112#ifdef CONFIG_SERIAL_8250
1113/*
1114 * Note about serial ports and consoles:
1115 * For console output, everyone uses the IOC3 UARTA (offset 0x178)
1116 * connected to the master node (look in ip27_setup_console() and
1117 * ip27prom_console_write()).
1118 *
1119 * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
1120 * addresses on a partitioned machine. Since we currently use the ioc3
1121 * serial ports, we use dynamic serial port discovery that the serial.c
1122 * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
1123 * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
1124 * than UARTB's, although UARTA on o200s has traditionally been known as
1125 * port 0. So, we just use one serial port from each ioc3 (since the
1126 * serial driver adds addresses to get to higher ports).
1127 *
1128 * The first one to do a register_console becomes the preferred console
1129 * (if there is no kernel command line console= directive). /dev/console
1130 * (ie 5, 1) is then "aliased" into the device number returned by the
1131 * "device" routine referred to in this console structure
1132 * (ip27prom_console_dev).
1133 *
1134 * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
1135 * around ioc3 oddities in this respect.
1136 *
1137 * The IOC3 serials use a 22MHz clock rate with an additional divider which
1138 * can be programmed in the SCR register if the DLAB bit is set.
1139 *
1140 * Register to interrupt zero because we share the interrupt with
1141 * the serial driver which we don't properly support yet.
1142 *
1143 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
1144 * registered.
1145 */
1146static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1147{
1148#define COSMISC_CONSTANT 6
1149
1150 struct uart_port port = {
1151 .irq = 0,
1152 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1153 .iotype = UPIO_MEM,
1154 .regshift = 0,
1155 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1156
1157 .membase = (unsigned char __iomem *) uart,
1158 .mapbase = (unsigned long) uart,
1159 };
1160 unsigned char lcr;
1161
1162 lcr = uart->iu_lcr;
1163 uart->iu_lcr = lcr | UART_LCR_DLAB;
1164 uart->iu_scr = COSMISC_CONSTANT,
1165 uart->iu_lcr = lcr;
1166 uart->iu_lcr;
1167 serial8250_register_port(&port);
1168}
1169
1170static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1171{
1172 /*
1173 * We need to recognice and treat the fourth MENET serial as it
1174 * does not have an SuperIO chip attached to it, therefore attempting
1175 * to access it will result in bus errors. We call something an
1176 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1177 * in it. This is paranoid but we want to avoid blowing up on a
1178 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1179 * not paranoid enough ...
1180 */
1181 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1182 return;
1183
1184 /*
1185 * Switch IOC3 to PIO mode. It probably already was but let's be
1186 * paranoid
1187 */
1188 ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
1189 ioc3->gpcr_s;
1190 ioc3->gppr_6 = 0;
1191 ioc3->gppr_6;
1192 ioc3->gppr_7 = 0;
1193 ioc3->gppr_7;
1194 ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
1195 ioc3->sscr_a;
1196 ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
1197 ioc3->sscr_b;
1198 /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
1199 ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1200 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1201 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1202 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1203 ioc3->sio_iec |= SIO_IR_SA_INT;
1204 ioc3->sscr_a = 0;
1205 ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1206 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1207 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1208 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1209 ioc3->sio_iec |= SIO_IR_SB_INT;
1210 ioc3->sscr_b = 0;
1211
1212 ioc3_8250_register(&ioc3->sregs.uarta);
1213 ioc3_8250_register(&ioc3->sregs.uartb);
1214}
1215#endif
1216
1217static const struct net_device_ops ioc3_netdev_ops = {
1218 .ndo_open = ioc3_open,
1219 .ndo_stop = ioc3_close,
1220 .ndo_start_xmit = ioc3_start_xmit,
1221 .ndo_tx_timeout = ioc3_timeout,
1222 .ndo_get_stats = ioc3_get_stats,
1223 .ndo_set_multicast_list = ioc3_set_multicast_list,
1224 .ndo_do_ioctl = ioc3_ioctl,
1225 .ndo_validate_addr = eth_validate_addr,
1226 .ndo_set_mac_address = ioc3_set_mac_address,
1227 .ndo_change_mtu = eth_change_mtu,
1228};
1229
1230static int __devinit ioc3_probe(struct pci_dev *pdev,
1231 const struct pci_device_id *ent)
1232{
1233 unsigned int sw_physid1, sw_physid2;
1234 struct net_device *dev = NULL;
1235 struct ioc3_private *ip;
1236 struct ioc3 *ioc3;
1237 unsigned long ioc3_base, ioc3_size;
1238 u32 vendor, model, rev;
1239 int err, pci_using_dac;
1240
1241 /* Configure DMA attributes. */
1242 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1243 if (!err) {
1244 pci_using_dac = 1;
1245 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1246 if (err < 0) {
1247 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
1248 "for consistent allocations\n", pci_name(pdev));
1249 goto out;
1250 }
1251 } else {
1252 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1253 if (err) {
1254 printk(KERN_ERR "%s: No usable DMA configuration, "
1255 "aborting.\n", pci_name(pdev));
1256 goto out;
1257 }
1258 pci_using_dac = 0;
1259 }
1260
1261 if (pci_enable_device(pdev))
1262 return -ENODEV;
1263
1264 dev = alloc_etherdev(sizeof(struct ioc3_private));
1265 if (!dev) {
1266 err = -ENOMEM;
1267 goto out_disable;
1268 }
1269
1270 if (pci_using_dac)
1271 dev->features |= NETIF_F_HIGHDMA;
1272
1273 err = pci_request_regions(pdev, "ioc3");
1274 if (err)
1275 goto out_free;
1276
1277 SET_NETDEV_DEV(dev, &pdev->dev);
1278
1279 ip = netdev_priv(dev);
1280
1281 dev->irq = pdev->irq;
1282
1283 ioc3_base = pci_resource_start(pdev, 0);
1284 ioc3_size = pci_resource_len(pdev, 0);
1285 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1286 if (!ioc3) {
1287 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1288 pci_name(pdev));
1289 err = -ENOMEM;
1290 goto out_res;
1291 }
1292 ip->regs = ioc3;
1293
1294#ifdef CONFIG_SERIAL_8250
1295 ioc3_serial_probe(pdev, ioc3);
1296#endif
1297
1298 spin_lock_init(&ip->ioc3_lock);
1299 init_timer(&ip->ioc3_timer);
1300
1301 ioc3_stop(ip);
1302 ioc3_init(dev);
1303
1304 ip->pdev = pdev;
1305
1306 ip->mii.phy_id_mask = 0x1f;
1307 ip->mii.reg_num_mask = 0x1f;
1308 ip->mii.dev = dev;
1309 ip->mii.mdio_read = ioc3_mdio_read;
1310 ip->mii.mdio_write = ioc3_mdio_write;
1311
1312 ioc3_mii_init(ip);
1313
1314 if (ip->mii.phy_id == -1) {
1315 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1316 pci_name(pdev));
1317 err = -ENODEV;
1318 goto out_stop;
1319 }
1320
1321 ioc3_mii_start(ip);
1322 ioc3_ssram_disc(ip);
1323 ioc3_get_eaddr(ip);
1324
1325 /* The IOC3-specific entries in the device structure. */
1326 dev->watchdog_timeo = 5 * HZ;
1327 dev->netdev_ops = &ioc3_netdev_ops;
1328 dev->ethtool_ops = &ioc3_ethtool_ops;
1329 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1330 dev->features = NETIF_F_IP_CSUM;
1331
1332 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1333 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1334
1335 err = register_netdev(dev);
1336 if (err)
1337 goto out_stop;
1338
1339 mii_check_media(&ip->mii, 1, 1);
1340 ioc3_setup_duplex(ip);
1341
1342 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1343 model = (sw_physid2 >> 4) & 0x3f;
1344 rev = sw_physid2 & 0xf;
1345 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1346 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
1347 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1348 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1349
1350 return 0;
1351
1352out_stop:
1353 ioc3_stop(ip);
1354 del_timer_sync(&ip->ioc3_timer);
1355 ioc3_free_rings(ip);
1356out_res:
1357 pci_release_regions(pdev);
1358out_free:
1359 free_netdev(dev);
1360out_disable:
1361 /*
1362 * We should call pci_disable_device(pdev); here if the IOC3 wasn't
1363 * such a weird device ...
1364 */
1365out:
1366 return err;
1367}
1368
1369static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1370{
1371 struct net_device *dev = pci_get_drvdata(pdev);
1372 struct ioc3_private *ip = netdev_priv(dev);
1373 struct ioc3 *ioc3 = ip->regs;
1374
1375 unregister_netdev(dev);
1376 del_timer_sync(&ip->ioc3_timer);
1377
1378 iounmap(ioc3);
1379 pci_release_regions(pdev);
1380 free_netdev(dev);
1381 /*
1382 * We should call pci_disable_device(pdev); here if the IOC3 wasn't
1383 * such a weird device ...
1384 */
1385}
1386
1387static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
1388 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1389 { 0 }
1390};
1391MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1392
1393static struct pci_driver ioc3_driver = {
1394 .name = "ioc3-eth",
1395 .id_table = ioc3_pci_tbl,
1396 .probe = ioc3_probe,
1397 .remove = __devexit_p(ioc3_remove_one),
1398};
1399
1400static int __init ioc3_init_module(void)
1401{
1402 return pci_register_driver(&ioc3_driver);
1403}
1404
1405static void __exit ioc3_cleanup_module(void)
1406{
1407 pci_unregister_driver(&ioc3_driver);
1408}
1409
1410static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1411{
1412 unsigned long data;
1413 struct ioc3_private *ip = netdev_priv(dev);
1414 struct ioc3 *ioc3 = ip->regs;
1415 unsigned int len;
1416 struct ioc3_etxd *desc;
1417 uint32_t w0 = 0;
1418 int produce;
1419
1420 /*
1421 * IOC3 has a fairly simple minded checksumming hardware which simply
1422 * adds up the 1's complement checksum for the entire packet and
1423 * inserts it at an offset which can be specified in the descriptor
1424 * into the transmit packet. This means we have to compensate for the
1425 * MAC header which should not be summed and the TCP/UDP pseudo headers
1426 * manually.
1427 */
1428 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1429 const struct iphdr *ih = ip_hdr(skb);
1430 const int proto = ntohs(ih->protocol);
1431 unsigned int csoff;
1432 uint32_t csum, ehsum;
1433 uint16_t *eh;
1434
1435 /* The MAC header. skb->mac seem the logic approach
1436 to find the MAC header - except it's a NULL pointer ... */
1437 eh = (uint16_t *) skb->data;
1438
1439 /* Sum up dest addr, src addr and protocol */
1440 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1441
1442 /* Fold ehsum. can't use csum_fold which negates also ... */
1443 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1444 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1445
1446 /* Skip IP header; it's sum is always zero and was
1447 already filled in by ip_output.c */
1448 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1449 ih->tot_len - (ih->ihl << 2),
1450 proto, 0xffff ^ ehsum);
1451
1452 csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
1453 csum = (csum & 0xffff) + (csum >> 16);
1454
1455 csoff = ETH_HLEN + (ih->ihl << 2);
1456 if (proto == IPPROTO_UDP) {
1457 csoff += offsetof(struct udphdr, check);
1458 udp_hdr(skb)->check = csum;
1459 }
1460 if (proto == IPPROTO_TCP) {
1461 csoff += offsetof(struct tcphdr, check);
1462 tcp_hdr(skb)->check = csum;
1463 }
1464
1465 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1466 }
1467
1468 spin_lock_irq(&ip->ioc3_lock);
1469
1470 data = (unsigned long) skb->data;
1471 len = skb->len;
1472
1473 produce = ip->tx_pi;
1474 desc = &ip->txr[produce];
1475
1476 if (len <= 104) {
1477 /* Short packet, let's copy it directly into the ring. */
1478 skb_copy_from_linear_data(skb, desc->data, skb->len);
1479 if (len < ETH_ZLEN) {
1480 /* Very short packet, pad with zeros at the end. */
1481 memset(desc->data + len, 0, ETH_ZLEN - len);
1482 len = ETH_ZLEN;
1483 }
1484 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1485 desc->bufcnt = cpu_to_be32(len);
1486 } else if ((data ^ (data + len - 1)) & 0x4000) {
1487 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1488 unsigned long s1 = b2 - data;
1489 unsigned long s2 = data + len - b2;
1490
1491 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1492 ETXD_B1V | ETXD_B2V | w0);
1493 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1494 (s2 << ETXD_B2CNT_SHIFT));
1495 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1496 desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
1497 } else {
1498 /* Normal sized packet that doesn't cross a page boundary. */
1499 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1500 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1501 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1502 }
1503
1504 BARRIER();
1505
1506 ip->tx_skbs[produce] = skb; /* Remember skb */
1507 produce = (produce + 1) & 127;
1508 ip->tx_pi = produce;
1509 ioc3_w_etpir(produce << 7); /* Fire ... */
1510
1511 ip->txqlen++;
1512
1513 if (ip->txqlen >= 127)
1514 netif_stop_queue(dev);
1515
1516 spin_unlock_irq(&ip->ioc3_lock);
1517
1518 return NETDEV_TX_OK;
1519}
1520
1521static void ioc3_timeout(struct net_device *dev)
1522{
1523 struct ioc3_private *ip = netdev_priv(dev);
1524
1525 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1526
1527 spin_lock_irq(&ip->ioc3_lock);
1528
1529 ioc3_stop(ip);
1530 ioc3_init(dev);
1531 ioc3_mii_init(ip);
1532 ioc3_mii_start(ip);
1533
1534 spin_unlock_irq(&ip->ioc3_lock);
1535
1536 netif_wake_queue(dev);
1537}
1538
1539/*
1540 * Given a multicast ethernet address, this routine calculates the
1541 * address's bit index in the logical address filter mask
1542 */
1543
1544static inline unsigned int ioc3_hash(const unsigned char *addr)
1545{
1546 unsigned int temp = 0;
1547 u32 crc;
1548 int bits;
1549
1550 crc = ether_crc_le(ETH_ALEN, addr);
1551
1552 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1553 for (bits = 6; --bits >= 0; ) {
1554 temp <<= 1;
1555 temp |= (crc & 0x1);
1556 crc >>= 1;
1557 }
1558
1559 return temp;
1560}
1561
1562static void ioc3_get_drvinfo (struct net_device *dev,
1563 struct ethtool_drvinfo *info)
1564{
1565 struct ioc3_private *ip = netdev_priv(dev);
1566
1567 strcpy (info->driver, IOC3_NAME);
1568 strcpy (info->version, IOC3_VERSION);
1569 strcpy (info->bus_info, pci_name(ip->pdev));
1570}
1571
1572static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1573{
1574 struct ioc3_private *ip = netdev_priv(dev);
1575 int rc;
1576
1577 spin_lock_irq(&ip->ioc3_lock);
1578 rc = mii_ethtool_gset(&ip->mii, cmd);
1579 spin_unlock_irq(&ip->ioc3_lock);
1580
1581 return rc;
1582}
1583
1584static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1585{
1586 struct ioc3_private *ip = netdev_priv(dev);
1587 int rc;
1588
1589 spin_lock_irq(&ip->ioc3_lock);
1590 rc = mii_ethtool_sset(&ip->mii, cmd);
1591 spin_unlock_irq(&ip->ioc3_lock);
1592
1593 return rc;
1594}
1595
1596static int ioc3_nway_reset(struct net_device *dev)
1597{
1598 struct ioc3_private *ip = netdev_priv(dev);
1599 int rc;
1600
1601 spin_lock_irq(&ip->ioc3_lock);
1602 rc = mii_nway_restart(&ip->mii);
1603 spin_unlock_irq(&ip->ioc3_lock);
1604
1605 return rc;
1606}
1607
1608static u32 ioc3_get_link(struct net_device *dev)
1609{
1610 struct ioc3_private *ip = netdev_priv(dev);
1611 int rc;
1612
1613 spin_lock_irq(&ip->ioc3_lock);
1614 rc = mii_link_ok(&ip->mii);
1615 spin_unlock_irq(&ip->ioc3_lock);
1616
1617 return rc;
1618}
1619
1620static const struct ethtool_ops ioc3_ethtool_ops = {
1621 .get_drvinfo = ioc3_get_drvinfo,
1622 .get_settings = ioc3_get_settings,
1623 .set_settings = ioc3_set_settings,
1624 .nway_reset = ioc3_nway_reset,
1625 .get_link = ioc3_get_link,
1626};
1627
1628static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1629{
1630 struct ioc3_private *ip = netdev_priv(dev);
1631 int rc;
1632
1633 spin_lock_irq(&ip->ioc3_lock);
1634 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1635 spin_unlock_irq(&ip->ioc3_lock);
1636
1637 return rc;
1638}
1639
1640static void ioc3_set_multicast_list(struct net_device *dev)
1641{
1642 struct netdev_hw_addr *ha;
1643 struct ioc3_private *ip = netdev_priv(dev);
1644 struct ioc3 *ioc3 = ip->regs;
1645 u64 ehar = 0;
1646
1647 netif_stop_queue(dev); /* Lock out others. */
1648
1649 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1650 ip->emcr |= EMCR_PROMISC;
1651 ioc3_w_emcr(ip->emcr);
1652 (void) ioc3_r_emcr();
1653 } else {
1654 ip->emcr &= ~EMCR_PROMISC;
1655 ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
1656 (void) ioc3_r_emcr();
1657
1658 if ((dev->flags & IFF_ALLMULTI) ||
1659 (netdev_mc_count(dev) > 64)) {
1660 /* Too many for hashing to make sense or we want all
1661 multicast packets anyway, so skip computing all the
1662 hashes and just accept all packets. */
1663 ip->ehar_h = 0xffffffff;
1664 ip->ehar_l = 0xffffffff;
1665 } else {
1666 netdev_for_each_mc_addr(ha, dev) {
1667 ehar |= (1UL << ioc3_hash(ha->addr));
1668 }
1669 ip->ehar_h = ehar >> 32;
1670 ip->ehar_l = ehar & 0xffffffff;
1671 }
1672 ioc3_w_ehar_h(ip->ehar_h);
1673 ioc3_w_ehar_l(ip->ehar_l);
1674 }
1675
1676 netif_wake_queue(dev); /* Let us get going again. */
1677}
1678
1679MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1680MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1681MODULE_LICENSE("GPL");
1682
1683module_init(ioc3_init_module);
1684module_exit(ioc3_cleanup_module);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
new file mode 100644
index 000000000000..60135aa55802
--- /dev/null
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -0,0 +1,855 @@
1/*
2 * meth.c -- O2 Builtin 10/100 Ethernet driver
3 *
4 * Copyright (C) 2001-2003 Ilya Volynets
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/interrupt.h>
21
22#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/device.h> /* struct device, et al */
25#include <linux/netdevice.h> /* struct device, and other headers */
26#include <linux/etherdevice.h> /* eth_type_trans */
27#include <linux/ip.h> /* struct iphdr */
28#include <linux/tcp.h> /* struct tcphdr */
29#include <linux/skbuff.h>
30#include <linux/mii.h> /* MII definitions */
31
32#include <asm/ip32/mace.h>
33#include <asm/ip32/ip32_ints.h>
34
35#include <asm/io.h>
36
37#include "meth.h"
38
39#ifndef MFE_DEBUG
40#define MFE_DEBUG 0
41#endif
42
43#if MFE_DEBUG>=1
44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
45#define MFE_RX_DEBUG 2
46#else
47#define DPRINTK(str,args...)
48#define MFE_RX_DEBUG 0
49#endif
50
51
52static const char *meth_str="SGI O2 Fast Ethernet";
53
54/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
55#define TX_TIMEOUT (400*HZ/1000)
56
57static int timeout = TX_TIMEOUT;
58module_param(timeout, int, 0);
59
60/*
61 * This structure is private to each device. It is used to pass
62 * packets in and out, so there is place for a packet
63 */
64struct meth_private {
65 /* in-memory copy of MAC Control register */
66 unsigned long mac_ctrl;
67 /* in-memory copy of DMA Control register */
68 unsigned long dma_ctrl;
69 /* address of PHY, used by mdio_* functions, initialized in mdio_probe */
70 unsigned long phy_addr;
71 tx_packet *tx_ring;
72 dma_addr_t tx_ring_dma;
73 struct sk_buff *tx_skbs[TX_RING_ENTRIES];
74 dma_addr_t tx_skb_dmas[TX_RING_ENTRIES];
75 unsigned long tx_read, tx_write, tx_count;
76
77 rx_packet *rx_ring[RX_RING_ENTRIES];
78 dma_addr_t rx_ring_dmas[RX_RING_ENTRIES];
79 struct sk_buff *rx_skbs[RX_RING_ENTRIES];
80 unsigned long rx_write;
81
82 spinlock_t meth_lock;
83};
84
85static void meth_tx_timeout(struct net_device *dev);
86static irqreturn_t meth_interrupt(int irq, void *dev_id);
87
88/* global, initialized in ip32-setup.c */
89char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
90
91static inline void load_eaddr(struct net_device *dev)
92{
93 int i;
94 u64 macaddr;
95
96 DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr);
97 macaddr = 0;
98 for (i = 0; i < 6; i++)
99 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
100
101 mace->eth.mac_addr = macaddr;
102}
103
104/*
105 * Waits for BUSY status of mdio bus to clear
106 */
107#define WAIT_FOR_PHY(___rval) \
108 while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \
109 udelay(25); \
110 }
111/*read phy register, return value read */
112static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
113{
114 unsigned long rval;
115 WAIT_FOR_PHY(rval);
116 mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f);
117 udelay(25);
118 mace->eth.phy_trans_go = 1;
119 udelay(25);
120 WAIT_FOR_PHY(rval);
121 return rval & MDIO_DATA_MASK;
122}
123
124static int mdio_probe(struct meth_private *priv)
125{
126 int i;
127 unsigned long p2, p3, flags;
128 /* check if phy is detected already */
129 if(priv->phy_addr>=0&&priv->phy_addr<32)
130 return 0;
131 spin_lock_irqsave(&priv->meth_lock, flags);
132 for (i=0;i<32;++i){
133 priv->phy_addr=i;
134 p2=mdio_read(priv,2);
135 p3=mdio_read(priv,3);
136#if MFE_DEBUG>=2
137 switch ((p2<<12)|(p3>>4)){
138 case PHY_QS6612X:
139 DPRINTK("PHY is QS6612X\n");
140 break;
141 case PHY_ICS1889:
142 DPRINTK("PHY is ICS1889\n");
143 break;
144 case PHY_ICS1890:
145 DPRINTK("PHY is ICS1890\n");
146 break;
147 case PHY_DP83840:
148 DPRINTK("PHY is DP83840\n");
149 break;
150 }
151#endif
152 if(p2!=0xffff&&p2!=0x0000){
153 DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4));
154 break;
155 }
156 }
157 spin_unlock_irqrestore(&priv->meth_lock, flags);
158 if(priv->phy_addr<32) {
159 return 0;
160 }
161 DPRINTK("Oopsie! PHY is not known!\n");
162 priv->phy_addr=-1;
163 return -ENODEV;
164}
165
166static void meth_check_link(struct net_device *dev)
167{
168 struct meth_private *priv = netdev_priv(dev);
169 unsigned long mii_advertising = mdio_read(priv, 4);
170 unsigned long mii_partner = mdio_read(priv, 5);
171 unsigned long negotiated = mii_advertising & mii_partner;
172 unsigned long duplex, speed;
173
174 if (mii_partner == 0xffff)
175 return;
176
177 speed = (negotiated & 0x0380) ? METH_100MBIT : 0;
178 duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ?
179 METH_PHY_FDX : 0;
180
181 if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) {
182 DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half");
183 if (duplex)
184 priv->mac_ctrl |= METH_PHY_FDX;
185 else
186 priv->mac_ctrl &= ~METH_PHY_FDX;
187 mace->eth.mac_ctrl = priv->mac_ctrl;
188 }
189
190 if ((priv->mac_ctrl & METH_100MBIT) ^ speed) {
191 DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10);
192 if (duplex)
193 priv->mac_ctrl |= METH_100MBIT;
194 else
195 priv->mac_ctrl &= ~METH_100MBIT;
196 mace->eth.mac_ctrl = priv->mac_ctrl;
197 }
198}
199
200
201static int meth_init_tx_ring(struct meth_private *priv)
202{
203 /* Init TX ring */
204 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
205 &priv->tx_ring_dma, GFP_ATOMIC);
206 if (!priv->tx_ring)
207 return -ENOMEM;
208 memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
209 priv->tx_count = priv->tx_read = priv->tx_write = 0;
210 mace->eth.tx_ring_base = priv->tx_ring_dma;
211 /* Now init skb save area */
212 memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
213 memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas));
214 return 0;
215}
216
217static int meth_init_rx_ring(struct meth_private *priv)
218{
219 int i;
220
221 for (i = 0; i < RX_RING_ENTRIES; i++) {
222 priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0);
223 /* 8byte status vector + 3quad padding + 2byte padding,
224 * to put data on 64bit aligned boundary */
225 skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
226 priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
227 /* I'll need to re-sync it after each RX */
228 priv->rx_ring_dmas[i] =
229 dma_map_single(NULL, priv->rx_ring[i],
230 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
231 mace->eth.rx_fifo = priv->rx_ring_dmas[i];
232 }
233 priv->rx_write = 0;
234 return 0;
235}
236static void meth_free_tx_ring(struct meth_private *priv)
237{
238 int i;
239
240 /* Remove any pending skb */
241 for (i = 0; i < TX_RING_ENTRIES; i++) {
242 if (priv->tx_skbs[i])
243 dev_kfree_skb(priv->tx_skbs[i]);
244 priv->tx_skbs[i] = NULL;
245 }
246 dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
247 priv->tx_ring_dma);
248}
249
250/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */
251static void meth_free_rx_ring(struct meth_private *priv)
252{
253 int i;
254
255 for (i = 0; i < RX_RING_ENTRIES; i++) {
256 dma_unmap_single(NULL, priv->rx_ring_dmas[i],
257 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
258 priv->rx_ring[i] = 0;
259 priv->rx_ring_dmas[i] = 0;
260 kfree_skb(priv->rx_skbs[i]);
261 }
262}
263
264int meth_reset(struct net_device *dev)
265{
266 struct meth_private *priv = netdev_priv(dev);
267
268 /* Reset card */
269 mace->eth.mac_ctrl = SGI_MAC_RESET;
270 udelay(1);
271 mace->eth.mac_ctrl = 0;
272 udelay(25);
273
274 /* Load ethernet address */
275 load_eaddr(dev);
276 /* Should load some "errata", but later */
277
278 /* Check for device */
279 if (mdio_probe(priv) < 0) {
280 DPRINTK("Unable to find PHY\n");
281 return -ENODEV;
282 }
283
284 /* Initial mode: 10 | Half-duplex | Accept normal packets */
285 priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
286 if (dev->flags & IFF_PROMISC)
287 priv->mac_ctrl |= METH_PROMISC;
288 mace->eth.mac_ctrl = priv->mac_ctrl;
289
290 /* Autonegotiate speed and duplex mode */
291 meth_check_link(dev);
292
293 /* Now set dma control, but don't enable DMA, yet */
294 priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) |
295 (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT);
296 mace->eth.dma_ctrl = priv->dma_ctrl;
297
298 return 0;
299}
300
301/*============End Helper Routines=====================*/
302
303/*
304 * Open and close
305 */
306static int meth_open(struct net_device *dev)
307{
308 struct meth_private *priv = netdev_priv(dev);
309 int ret;
310
311 priv->phy_addr = -1; /* No PHY is known yet... */
312
313 /* Initialize the hardware */
314 ret = meth_reset(dev);
315 if (ret < 0)
316 return ret;
317
318 /* Allocate the ring buffers */
319 ret = meth_init_tx_ring(priv);
320 if (ret < 0)
321 return ret;
322 ret = meth_init_rx_ring(priv);
323 if (ret < 0)
324 goto out_free_tx_ring;
325
326 ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
327 if (ret) {
328 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
329 goto out_free_rx_ring;
330 }
331
332 /* Start DMA */
333 priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/
334 METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
335 mace->eth.dma_ctrl = priv->dma_ctrl;
336
337 DPRINTK("About to start queue\n");
338 netif_start_queue(dev);
339
340 return 0;
341
342out_free_rx_ring:
343 meth_free_rx_ring(priv);
344out_free_tx_ring:
345 meth_free_tx_ring(priv);
346
347 return ret;
348}
349
350static int meth_release(struct net_device *dev)
351{
352 struct meth_private *priv = netdev_priv(dev);
353
354 DPRINTK("Stopping queue\n");
355 netif_stop_queue(dev); /* can't transmit any more */
356 /* shut down DMA */
357 priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN |
358 METH_DMA_RX_EN | METH_DMA_RX_INT_EN);
359 mace->eth.dma_ctrl = priv->dma_ctrl;
360 free_irq(dev->irq, dev);
361 meth_free_tx_ring(priv);
362 meth_free_rx_ring(priv);
363
364 return 0;
365}
366
367/*
368 * Receive a packet: retrieve, encapsulate and pass over to upper levels
369 */
370static void meth_rx(struct net_device* dev, unsigned long int_status)
371{
372 struct sk_buff *skb;
373 unsigned long status, flags;
374 struct meth_private *priv = netdev_priv(dev);
375 unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
376
377 spin_lock_irqsave(&priv->meth_lock, flags);
378 priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
379 mace->eth.dma_ctrl = priv->dma_ctrl;
380 spin_unlock_irqrestore(&priv->meth_lock, flags);
381
382 if (int_status & METH_INT_RX_UNDERFLOW) {
383 fifo_rptr = (fifo_rptr - 1) & 0x0f;
384 }
385 while (priv->rx_write != fifo_rptr) {
386 dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
387 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
388 status = priv->rx_ring[priv->rx_write]->status.raw;
389#if MFE_DEBUG
390 if (!(status & METH_RX_ST_VALID)) {
391 DPRINTK("Not received? status=%016lx\n",status);
392 }
393#endif
394 if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) {
395 int len = (status & 0xffff) - 4; /* omit CRC */
396 /* length sanity check */
397 if (len < 60 || len > 1518) {
398 printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n",
399 dev->name, priv->rx_write,
400 priv->rx_ring[priv->rx_write]->status.raw);
401 dev->stats.rx_errors++;
402 dev->stats.rx_length_errors++;
403 skb = priv->rx_skbs[priv->rx_write];
404 } else {
405 skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC);
406 if (!skb) {
407 /* Ouch! No memory! Drop packet on the floor */
408 DPRINTK("No mem: dropping packet\n");
409 dev->stats.rx_dropped++;
410 skb = priv->rx_skbs[priv->rx_write];
411 } else {
412 struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write];
413 /* 8byte status vector + 3quad padding + 2byte padding,
414 * to put data on 64bit aligned boundary */
415 skb_reserve(skb, METH_RX_HEAD);
416 /* Write metadata, and then pass to the receive level */
417 skb_put(skb_c, len);
418 priv->rx_skbs[priv->rx_write] = skb;
419 skb_c->protocol = eth_type_trans(skb_c, dev);
420 dev->stats.rx_packets++;
421 dev->stats.rx_bytes += len;
422 netif_rx(skb_c);
423 }
424 }
425 } else {
426 dev->stats.rx_errors++;
427 skb=priv->rx_skbs[priv->rx_write];
428#if MFE_DEBUG>0
429 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status);
430 if(status&METH_RX_ST_RCV_CODE_VIOLATION)
431 printk(KERN_WARNING "Receive Code Violation\n");
432 if(status&METH_RX_ST_CRC_ERR)
433 printk(KERN_WARNING "CRC error\n");
434 if(status&METH_RX_ST_INV_PREAMBLE_CTX)
435 printk(KERN_WARNING "Invalid Preamble Context\n");
436 if(status&METH_RX_ST_LONG_EVT_SEEN)
437 printk(KERN_WARNING "Long Event Seen...\n");
438 if(status&METH_RX_ST_BAD_PACKET)
439 printk(KERN_WARNING "Bad Packet\n");
440 if(status&METH_RX_ST_CARRIER_EVT_SEEN)
441 printk(KERN_WARNING "Carrier Event Seen\n");
442#endif
443 }
444 priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
445 priv->rx_ring[priv->rx_write]->status.raw = 0;
446 priv->rx_ring_dmas[priv->rx_write] =
447 dma_map_single(NULL, priv->rx_ring[priv->rx_write],
448 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
449 mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
450 ADVANCE_RX_PTR(priv->rx_write);
451 }
452 spin_lock_irqsave(&priv->meth_lock, flags);
453 /* In case there was underflow, and Rx DMA was disabled */
454 priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
455 mace->eth.dma_ctrl = priv->dma_ctrl;
456 mace->eth.int_stat = METH_INT_RX_THRESHOLD;
457 spin_unlock_irqrestore(&priv->meth_lock, flags);
458}
459
460static int meth_tx_full(struct net_device *dev)
461{
462 struct meth_private *priv = netdev_priv(dev);
463
464 return priv->tx_count >= TX_RING_ENTRIES - 1;
465}
466
467static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
468{
469 struct meth_private *priv = netdev_priv(dev);
470 unsigned long status, flags;
471 struct sk_buff *skb;
472 unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
473
474 spin_lock_irqsave(&priv->meth_lock, flags);
475
476 /* Stop DMA notification */
477 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
478 mace->eth.dma_ctrl = priv->dma_ctrl;
479
480 while (priv->tx_read != rptr) {
481 skb = priv->tx_skbs[priv->tx_read];
482 status = priv->tx_ring[priv->tx_read].header.raw;
483#if MFE_DEBUG>=1
484 if (priv->tx_read == priv->tx_write)
485 DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr);
486#endif
487 if (status & METH_TX_ST_DONE) {
488 if (status & METH_TX_ST_SUCCESS){
489 dev->stats.tx_packets++;
490 dev->stats.tx_bytes += skb->len;
491 } else {
492 dev->stats.tx_errors++;
493#if MFE_DEBUG>=1
494 DPRINTK("TX error: status=%016lx <",status);
495 if(status & METH_TX_ST_SUCCESS)
496 printk(" SUCCESS");
497 if(status & METH_TX_ST_TOOLONG)
498 printk(" TOOLONG");
499 if(status & METH_TX_ST_UNDERRUN)
500 printk(" UNDERRUN");
501 if(status & METH_TX_ST_EXCCOLL)
502 printk(" EXCCOLL");
503 if(status & METH_TX_ST_DEFER)
504 printk(" DEFER");
505 if(status & METH_TX_ST_LATECOLL)
506 printk(" LATECOLL");
507 printk(" >\n");
508#endif
509 }
510 } else {
511 DPRINTK("RPTR points us here, but packet not done?\n");
512 break;
513 }
514 dev_kfree_skb_irq(skb);
515 priv->tx_skbs[priv->tx_read] = NULL;
516 priv->tx_ring[priv->tx_read].header.raw = 0;
517 priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
518 priv->tx_count--;
519 }
520
521 /* wake up queue if it was stopped */
522 if (netif_queue_stopped(dev) && !meth_tx_full(dev)) {
523 netif_wake_queue(dev);
524 }
525
526 mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
527 spin_unlock_irqrestore(&priv->meth_lock, flags);
528}
529
530static void meth_error(struct net_device* dev, unsigned status)
531{
532 struct meth_private *priv = netdev_priv(dev);
533 unsigned long flags;
534
535 printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
536 /* check for errors too... */
537 if (status & (METH_INT_TX_LINK_FAIL))
538 printk(KERN_WARNING "meth: link failure\n");
539 /* Should I do full reset in this case? */
540 if (status & (METH_INT_MEM_ERROR))
541 printk(KERN_WARNING "meth: memory error\n");
542 if (status & (METH_INT_TX_ABORT))
543 printk(KERN_WARNING "meth: aborted\n");
544 if (status & (METH_INT_RX_OVERFLOW))
545 printk(KERN_WARNING "meth: Rx overflow\n");
546 if (status & (METH_INT_RX_UNDERFLOW)) {
547 printk(KERN_WARNING "meth: Rx underflow\n");
548 spin_lock_irqsave(&priv->meth_lock, flags);
549 mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
550 /* more underflow interrupts will be delivered,
551 * effectively throwing us into an infinite loop.
552 * Thus I stop processing Rx in this case. */
553 priv->dma_ctrl &= ~METH_DMA_RX_EN;
554 mace->eth.dma_ctrl = priv->dma_ctrl;
555 DPRINTK("Disabled meth Rx DMA temporarily\n");
556 spin_unlock_irqrestore(&priv->meth_lock, flags);
557 }
558 mace->eth.int_stat = METH_INT_ERROR;
559}
560
561/*
562 * The typical interrupt entry point
563 */
564static irqreturn_t meth_interrupt(int irq, void *dev_id)
565{
566 struct net_device *dev = (struct net_device *)dev_id;
567 struct meth_private *priv = netdev_priv(dev);
568 unsigned long status;
569
570 status = mace->eth.int_stat;
571 while (status & 0xff) {
572 /* First handle errors - if we get Rx underflow,
573 * Rx DMA will be disabled, and Rx handler will reenable
574 * it. I don't think it's possible to get Rx underflow,
575 * without getting Rx interrupt */
576 if (status & METH_INT_ERROR) {
577 meth_error(dev, status);
578 }
579 if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) {
580 /* a transmission is over: free the skb */
581 meth_tx_cleanup(dev, status);
582 }
583 if (status & METH_INT_RX_THRESHOLD) {
584 if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN))
585 break;
586 /* send it to meth_rx for handling */
587 meth_rx(dev, status);
588 }
589 status = mace->eth.int_stat;
590 }
591
592 return IRQ_HANDLED;
593}
594
595/*
596 * Transmits packets that fit into TX descriptor (are <=120B)
597 */
598static void meth_tx_short_prepare(struct meth_private *priv,
599 struct sk_buff *skb)
600{
601 tx_packet *desc = &priv->tx_ring[priv->tx_write];
602 int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
603
604 desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
605 /* maybe I should set whole thing to 0 first... */
606 skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
607 if (skb->len < len)
608 memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
609}
610#define TX_CATBUF1 BIT(25)
611static void meth_tx_1page_prepare(struct meth_private *priv,
612 struct sk_buff *skb)
613{
614 tx_packet *desc = &priv->tx_ring[priv->tx_write];
615 void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
616 int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
617 int buffer_len = skb->len - unaligned_len;
618 dma_addr_t catbuf;
619
620 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
621
622 /* unaligned part */
623 if (unaligned_len) {
624 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
625 unaligned_len);
626 desc->header.raw |= (128 - unaligned_len) << 16;
627 }
628
629 /* first page */
630 catbuf = dma_map_single(NULL, buffer_data, buffer_len,
631 DMA_TO_DEVICE);
632 desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
633 desc->data.cat_buf[0].form.len = buffer_len - 1;
634}
635#define TX_CATBUF2 BIT(26)
636static void meth_tx_2page_prepare(struct meth_private *priv,
637 struct sk_buff *skb)
638{
639 tx_packet *desc = &priv->tx_ring[priv->tx_write];
640 void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
641 void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
642 int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
643 int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data);
644 int buffer2_len = skb->len - buffer1_len - unaligned_len;
645 dma_addr_t catbuf1, catbuf2;
646
647 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
648 /* unaligned part */
649 if (unaligned_len){
650 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
651 unaligned_len);
652 desc->header.raw |= (128 - unaligned_len) << 16;
653 }
654
655 /* first page */
656 catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
657 DMA_TO_DEVICE);
658 desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
659 desc->data.cat_buf[0].form.len = buffer1_len - 1;
660 /* second page */
661 catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
662 DMA_TO_DEVICE);
663 desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
664 desc->data.cat_buf[1].form.len = buffer2_len - 1;
665}
666
667static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
668{
669 /* Remember the skb, so we can free it at interrupt time */
670 priv->tx_skbs[priv->tx_write] = skb;
671 if (skb->len <= 120) {
672 /* Whole packet fits into descriptor */
673 meth_tx_short_prepare(priv, skb);
674 } else if (PAGE_ALIGN((unsigned long)skb->data) !=
675 PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
676 /* Packet crosses page boundary */
677 meth_tx_2page_prepare(priv, skb);
678 } else {
679 /* Packet is in one page */
680 meth_tx_1page_prepare(priv, skb);
681 }
682 priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1);
683 mace->eth.tx_info = priv->tx_write;
684 priv->tx_count++;
685}
686
687/*
688 * Transmit a packet (called by the kernel)
689 */
690static int meth_tx(struct sk_buff *skb, struct net_device *dev)
691{
692 struct meth_private *priv = netdev_priv(dev);
693 unsigned long flags;
694
695 spin_lock_irqsave(&priv->meth_lock, flags);
696 /* Stop DMA notification */
697 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
698 mace->eth.dma_ctrl = priv->dma_ctrl;
699
700 meth_add_to_tx_ring(priv, skb);
701 dev->trans_start = jiffies; /* save the timestamp */
702
703 /* If TX ring is full, tell the upper layer to stop sending packets */
704 if (meth_tx_full(dev)) {
705 printk(KERN_DEBUG "TX full: stopping\n");
706 netif_stop_queue(dev);
707 }
708
709 /* Restart DMA notification */
710 priv->dma_ctrl |= METH_DMA_TX_INT_EN;
711 mace->eth.dma_ctrl = priv->dma_ctrl;
712
713 spin_unlock_irqrestore(&priv->meth_lock, flags);
714
715 return NETDEV_TX_OK;
716}
717
718/*
719 * Deal with a transmit timeout.
720 */
721static void meth_tx_timeout(struct net_device *dev)
722{
723 struct meth_private *priv = netdev_priv(dev);
724 unsigned long flags;
725
726 printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
727
728 /* Protect against concurrent rx interrupts */
729 spin_lock_irqsave(&priv->meth_lock,flags);
730
731 /* Try to reset the interface. */
732 meth_reset(dev);
733
734 dev->stats.tx_errors++;
735
736 /* Clear all rings */
737 meth_free_tx_ring(priv);
738 meth_free_rx_ring(priv);
739 meth_init_tx_ring(priv);
740 meth_init_rx_ring(priv);
741
742 /* Restart dma */
743 priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
744 mace->eth.dma_ctrl = priv->dma_ctrl;
745
746 /* Enable interrupt */
747 spin_unlock_irqrestore(&priv->meth_lock, flags);
748
749 dev->trans_start = jiffies; /* prevent tx timeout */
750 netif_wake_queue(dev);
751}
752
753/*
754 * Ioctl commands
755 */
756static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
757{
758 /* XXX Not yet implemented */
759 switch(cmd) {
760 case SIOCGMIIPHY:
761 case SIOCGMIIREG:
762 case SIOCSMIIREG:
763 default:
764 return -EOPNOTSUPP;
765 }
766}
767
768static const struct net_device_ops meth_netdev_ops = {
769 .ndo_open = meth_open,
770 .ndo_stop = meth_release,
771 .ndo_start_xmit = meth_tx,
772 .ndo_do_ioctl = meth_ioctl,
773 .ndo_tx_timeout = meth_tx_timeout,
774 .ndo_change_mtu = eth_change_mtu,
775 .ndo_validate_addr = eth_validate_addr,
776 .ndo_set_mac_address = eth_mac_addr,
777};
778
779/*
780 * The init function.
781 */
782static int __devinit meth_probe(struct platform_device *pdev)
783{
784 struct net_device *dev;
785 struct meth_private *priv;
786 int err;
787
788 dev = alloc_etherdev(sizeof(struct meth_private));
789 if (!dev)
790 return -ENOMEM;
791
792 dev->netdev_ops = &meth_netdev_ops;
793 dev->watchdog_timeo = timeout;
794 dev->irq = MACE_ETHERNET_IRQ;
795 dev->base_addr = (unsigned long)&mace->eth;
796 memcpy(dev->dev_addr, o2meth_eaddr, 6);
797
798 priv = netdev_priv(dev);
799 spin_lock_init(&priv->meth_lock);
800 SET_NETDEV_DEV(dev, &pdev->dev);
801
802 err = register_netdev(dev);
803 if (err) {
804 free_netdev(dev);
805 return err;
806 }
807
808 printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
809 dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29));
810 return 0;
811}
812
813static int __exit meth_remove(struct platform_device *pdev)
814{
815 struct net_device *dev = platform_get_drvdata(pdev);
816
817 unregister_netdev(dev);
818 free_netdev(dev);
819 platform_set_drvdata(pdev, NULL);
820
821 return 0;
822}
823
824static struct platform_driver meth_driver = {
825 .probe = meth_probe,
826 .remove = __exit_p(meth_remove),
827 .driver = {
828 .name = "meth",
829 .owner = THIS_MODULE,
830 }
831};
832
833static int __init meth_init_module(void)
834{
835 int err;
836
837 err = platform_driver_register(&meth_driver);
838 if (err)
839 printk(KERN_ERR "Driver registration failed\n");
840
841 return err;
842}
843
844static void __exit meth_exit_module(void)
845{
846 platform_driver_unregister(&meth_driver);
847}
848
849module_init(meth_init_module);
850module_exit(meth_exit_module);
851
852MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
853MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
854MODULE_LICENSE("GPL");
855MODULE_ALIAS("platform:meth");
diff --git a/drivers/net/ethernet/sgi/meth.h b/drivers/net/ethernet/sgi/meth.h
new file mode 100644
index 000000000000..5b145c6bad60
--- /dev/null
+++ b/drivers/net/ethernet/sgi/meth.h
@@ -0,0 +1,243 @@
1
2/*
3 * snull.h -- definitions for the network module
4 *
5 * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
6 * Copyright (C) 2001 O'Reilly & Associates
7 *
8 * The source code in this file can be freely used, adapted,
9 * and redistributed in source or binary form, so long as an
10 * acknowledgment appears in derived source files. The citation
11 * should list that the code comes from the book "Linux Device
12 * Drivers" by Alessandro Rubini and Jonathan Corbet, published
13 * by O'Reilly & Associates. No warranty is attached;
14 * we cannot take responsibility for errors or fitness for use.
15 */
16
17/* version dependencies have been confined to a separate file */
18
19/* Tunable parameters */
20#define TX_RING_ENTRIES 64 /* 64-512?*/
21
22#define RX_RING_ENTRIES 16 /* Do not change */
23/* Internal constants */
24#define TX_RING_BUFFER_SIZE (TX_RING_ENTRIES*sizeof(tx_packet))
25#define RX_BUFFER_SIZE 1546 /* ethenet packet size */
26#define METH_RX_BUFF_SIZE 4096
27#define METH_RX_HEAD 34 /* status + 3 quad garbage-fill + 2 byte zero-pad */
28#define RX_BUFFER_OFFSET (sizeof(rx_status_vector)+2) /* staus vector + 2 bytes of padding */
29#define RX_BUCKET_SIZE 256
30
31/* For more detailed explanations of what each field menas,
32 see Nick's great comments to #defines below (or docs, if
33 you are lucky enough toget hold of them :)*/
34
35/* tx status vector is written over tx command header upon
36 dma completion. */
37
38typedef struct tx_status_vector {
39 u64 sent:1; /* always set to 1...*/
40 u64 pad0:34;/* always set to 0 */
41 u64 flags:9; /*I'm too lazy to specify each one separately at the moment*/
42 u64 col_retry_cnt:4; /*collision retry count*/
43 u64 len:16; /*Transmit length in bytes*/
44} tx_status_vector;
45
46/*
47 * Each packet is 128 bytes long.
48 * It consists of header, 0-3 concatination
49 * buffer pointers and up to 120 data bytes.
50 */
51typedef struct tx_packet_hdr {
52 u64 pad1:36; /*should be filled with 0 */
53 u64 cat_ptr3_valid:1, /*Concatination pointer valid flags*/
54 cat_ptr2_valid:1,
55 cat_ptr1_valid:1;
56 u64 tx_int_flag:1; /*Generate TX intrrupt when packet has been sent*/
57 u64 term_dma_flag:1; /*Terminate transmit DMA on transmit abort conditions*/
58 u64 data_offset:7; /*Starting byte offset in ring data block*/
59 u64 data_len:16; /*Length of valid data in bytes-1*/
60} tx_packet_hdr;
61typedef union tx_cat_ptr {
62 struct {
63 u64 pad2:16; /* should be 0 */
64 u64 len:16; /*length of buffer data - 1*/
65 u64 start_addr:29; /*Physical starting address*/
66 u64 pad1:3; /* should be zero */
67 } form;
68 u64 raw;
69} tx_cat_ptr;
70
71typedef struct tx_packet {
72 union {
73 tx_packet_hdr header;
74 tx_status_vector res;
75 u64 raw;
76 }header;
77 union {
78 tx_cat_ptr cat_buf[3];
79 char dt[120];
80 } data;
81} tx_packet;
82
83typedef union rx_status_vector {
84 volatile struct {
85 u64 pad1:1;/*fill it with ones*/
86 u64 pad2:15;/*fill with 0*/
87 u64 ip_chk_sum:16;
88 u64 seq_num:5;
89 u64 mac_addr_match:1;
90 u64 mcast_addr_match:1;
91 u64 carrier_event_seen:1;
92 u64 bad_packet:1;
93 u64 long_event_seen:1;
94 u64 invalid_preamble:1;
95 u64 broadcast:1;
96 u64 multicast:1;
97 u64 crc_error:1;
98 u64 huh:1;/*???*/
99 u64 rx_code_violation:1;
100 u64 rx_len:16;
101 } parsed;
102 volatile u64 raw;
103} rx_status_vector;
104
105typedef struct rx_packet {
106 rx_status_vector status;
107 u64 pad[3]; /* For whatever reason, there needs to be 4 double-word offset */
108 u16 pad2;
109 char buf[METH_RX_BUFF_SIZE-sizeof(rx_status_vector)-3*sizeof(u64)-sizeof(u16)];/* data */
110} rx_packet;
111
112#define TX_INFO_RPTR 0x00FF0000
113#define TX_INFO_WPTR 0x000000FF
114
115 /* Bits in METH_MAC */
116
117#define SGI_MAC_RESET BIT(0) /* 0: MAC110 active in run mode, 1: Global reset signal to MAC110 core is active */
118#define METH_PHY_FDX BIT(1) /* 0: Disable full duplex, 1: Enable full duplex */
119#define METH_PHY_LOOP BIT(2) /* 0: Normal operation, follows 10/100mbit and M10T/MII select, 1: loops internal MII bus */
120 /* selects ignored */
121#define METH_100MBIT BIT(3) /* 0: 10meg mode, 1: 100meg mode */
122#define METH_PHY_MII BIT(4) /* 0: MII selected, 1: SIA selected */
123 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
124 /* cause a collision to be reported. */
125
126 /* Bits 5 and 6 are used to determine the Destination address filter mode */
127#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
128#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
129#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
130#define METH_PROMISC 0x60 /* 11: Promiscious mode */
131
132#define METH_PHY_LINK_FAIL BIT(7) /* 0: Link failure detection disabled, 1: Hardware scans for link failure in PHY */
133
134#define METH_MAC_IPG 0x1ffff00
135
136#define METH_DEFAULT_IPG ((17<<15) | (11<<22) | (21<<8))
137 /* 0x172e5c00 */ /* 23, 23, 23 */ /*0x54A9500 *//*21,21,21*/
138 /* Bits 8 through 14 are used to determine Inter-Packet Gap between "Back to Back" packets */
139 /* The gap depends on the clock speed of the link, 80ns per increment for 100baseT, 800ns */
140 /* per increment for 10BaseT */
141
142 /* Bits 15 through 21 are used to determine IPGR1 */
143
144 /* Bits 22 through 28 are used to determine IPGR2 */
145
146#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */
147 /* 000: Initial revision */
148 /* 001: First revision, Improved TX concatenation */
149
150
151/* DMA control bits */
152#define METH_RX_OFFSET_SHIFT 12 /* Bits 12:14 of DMA control register indicate starting offset of packet data for RX operation */
153#define METH_RX_DEPTH_SHIFT 4 /* Bits 8:4 define RX fifo depth -- when # of RX fifo entries != depth, interrupt is generted */
154
155#define METH_DMA_TX_EN BIT(1) /* enable TX DMA */
156#define METH_DMA_TX_INT_EN BIT(0) /* enable TX Buffer Empty interrupt */
157#define METH_DMA_RX_EN BIT(15) /* Enable RX */
158#define METH_DMA_RX_INT_EN BIT(9) /* Enable interrupt on RX packet */
159
160/* RX FIFO MCL Info bits */
161#define METH_RX_FIFO_WPTR(x) (((x)>>16)&0xf)
162#define METH_RX_FIFO_RPTR(x) (((x)>>8)&0xf)
163#define METH_RX_FIFO_DEPTH(x) ((x)&0x1f)
164
165/* RX status bits */
166
167#define METH_RX_ST_VALID BIT(63)
168#define METH_RX_ST_RCV_CODE_VIOLATION BIT(16)
169#define METH_RX_ST_DRBL_NBL BIT(17)
170#define METH_RX_ST_CRC_ERR BIT(18)
171#define METH_RX_ST_MCAST_PKT BIT(19)
172#define METH_RX_ST_BCAST_PKT BIT(20)
173#define METH_RX_ST_INV_PREAMBLE_CTX BIT(21)
174#define METH_RX_ST_LONG_EVT_SEEN BIT(22)
175#define METH_RX_ST_BAD_PACKET BIT(23)
176#define METH_RX_ST_CARRIER_EVT_SEEN BIT(24)
177#define METH_RX_ST_MCAST_FILTER_MATCH BIT(25)
178#define METH_RX_ST_PHYS_ADDR_MATCH BIT(26)
179
180#define METH_RX_STATUS_ERRORS \
181 ( \
182 METH_RX_ST_RCV_CODE_VIOLATION| \
183 METH_RX_ST_CRC_ERR| \
184 METH_RX_ST_INV_PREAMBLE_CTX| \
185 METH_RX_ST_LONG_EVT_SEEN| \
186 METH_RX_ST_BAD_PACKET| \
187 METH_RX_ST_CARRIER_EVT_SEEN \
188 )
189 /* Bits in METH_INT */
190 /* Write _1_ to corresponding bit to clear */
191#define METH_INT_TX_EMPTY BIT(0) /* 0: No interrupt pending, 1: The TX ring buffer is empty */
192#define METH_INT_TX_PKT BIT(1) /* 0: No interrupt pending */
193 /* 1: A TX message had the INT request bit set, the packet has been sent. */
194#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */
195#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */
196 /* 1: A memory error occurred during DMA, DMA stopped, Fatal */
197#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */
198#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */
199#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */
200#define METH_INT_RX_OVERFLOW BIT(7) /* 0: No interrupt pending, 1: DMA FIFO Overflow, DMA stopped, FATAL */
201
202/*#define METH_INT_RX_RPTR_MASK 0x0001F00*/ /* Bits 8 through 12 alias of RX read-pointer */
203#define METH_INT_RX_RPTR_MASK 0x0000F00 /* Bits 8 through 11 alias of RX read-pointer - so, is Rx FIFO 16 or 32 entry?*/
204
205 /* Bits 13 through 15 are always 0. */
206
207#define METH_INT_TX_RPTR_MASK 0x1FF0000 /* Bits 16 through 24 alias of TX read-pointer */
208
209#define METH_INT_RX_SEQ_MASK 0x2E000000 /* Bits 25 through 29 are the starting seq number for the message at the */
210
211 /* top of the queue */
212
213#define METH_INT_ERROR (METH_INT_TX_LINK_FAIL| \
214 METH_INT_MEM_ERROR| \
215 METH_INT_TX_ABORT| \
216 METH_INT_RX_OVERFLOW| \
217 METH_INT_RX_UNDERFLOW)
218
219#define METH_INT_MCAST_HASH BIT(30) /* If RX DMA is enabled the hash select logic output is latched here */
220
221/* TX status bits */
222#define METH_TX_ST_DONE BIT(63) /* TX complete */
223#define METH_TX_ST_SUCCESS BIT(23) /* Packet was transmitted successfully */
224#define METH_TX_ST_TOOLONG BIT(24) /* TX abort due to excessive length */
225#define METH_TX_ST_UNDERRUN BIT(25) /* TX abort due to underrun (?) */
226#define METH_TX_ST_EXCCOLL BIT(26) /* TX abort due to excess collisions */
227#define METH_TX_ST_DEFER BIT(27) /* TX abort due to excess deferals */
228#define METH_TX_ST_LATECOLL BIT(28) /* TX abort due to late collision */
229
230
231/* Tx command header bits */
232#define METH_TX_CMD_INT_EN BIT(24) /* Generate TX interrupt when packet is sent */
233
234/* Phy MDIO interface busy flag */
235#define MDIO_BUSY BIT(16)
236#define MDIO_DATA_MASK 0xFFFF
237/* PHY defines */
238#define PHY_QS6612X 0x0181441 /* Quality TX */
239#define PHY_ICS1889 0x0015F41 /* ICS FX */
240#define PHY_ICS1890 0x0015F42 /* ICS TX */
241#define PHY_DP83840 0x20005C0 /* National TX */
242
243#define ADVANCE_RX_PTR(x) x=(x+1)&(RX_RING_ENTRIES-1)