aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tsi108_eth.c
diff options
context:
space:
mode:
authorZang Roy-r61911 <tie-fei.zang@freescale.com>2006-11-08 22:49:13 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-02 00:12:03 -0500
commit5e123b844a1cbd4ec258cd37847ce4d57fa308c1 (patch)
tree82400847ed495ecac9521406d34fcdbdcbf15213 /drivers/net/tsi108_eth.c
parent31f817e9d6f325b10a316bb84237cae3739487ed (diff)
[PATCH] Add tsi108/9 On Chip Ethernet device driver support
Add tsi108/9 on chip Ethernet controller driver support. The driver code collects the feedback of previous posting form the mailing list and gives the update. MPC7448HPC2 platform in arch/powerpc uses tsi108 bridge. The following is a brief description of the Ethernet controller: The Tsi108/9 Ethernet Controller connects Switch Fabric to two independent Gigabit Ethernet ports,E0 and E1. It uses a single Management interface to manage the two physical connection devices (PHYs). Each Ethernet port has its own statistics monitor that tracks and reports key interface statistics. Each port supports a 256-entry hash table for address filtering. In addition, each port is bridged to the Switch Fabric through a 2-Kbyte transmit FIFO and a 4-Kbyte Receive FIFO. Each Ethernet port also has a pair of internal Ethernet DMA channels to support the transmit and receive data flows. The Ethernet DMA channels use descriptors set up in memory, the memory map of the device, and access via the Switch Fabric. The Ethernet Controller’s DMA arbiter handles arbitration for the Switch Fabric. The Controller also has a register bus interface for register accesses and status monitor control. The PMD (Physical Media Device) interface operates in MII, GMII, or TBI modes. The MII mode is used for connecting with 10 or 100 Mbit/s PMDs. The GMII and TBI modes are used to connect with Gigabit PMDs. Internal data flows to and from the Ethernet Controller through the Switch Fabric. Each Ethernet port uses its transmit and receive DMA channels to manage data flows through buffer descriptors that are predefined by the system (the descriptors can exist anywhere in the system memory map). These descriptors are data structures that point to buffers filled with data ready to transmit over Ethernet, or they point to empty buffers ready to receive data from Ethernet. Signed-off-by: Alexandre Bounine <Alexandre.Bounine@tundra.com> Signed-off-by: Roy Zang <tie-fei.zang@freescale.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/tsi108_eth.c')
-rw-r--r--drivers/net/tsi108_eth.c1708
1 files changed, 1708 insertions, 0 deletions
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 000000000000..893808ab3742
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1708 @@
1/*******************************************************************************
2
3 Copyright(c) 2006 Tundra Semiconductor Corporation.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the Free
7 Software Foundation; either version 2 of the License, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 59
17 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19*******************************************************************************/
20
21/* This driver is based on the driver code originally developed
22 * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
23 * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation
24 *
25 * Currently changes from original version are:
26 * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
27 * - modifications to handle two ports independently and support for
28 * additional PHY devices (alexandre.bounine@tundra.com)
29 * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
30 *
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/net.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/skbuff.h>
40#include <linux/slab.h>
41#include <linux/sched.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44#include <linux/crc32.h>
45#include <linux/mii.h>
46#include <linux/device.h>
47#include <linux/pci.h>
48#include <linux/rtnetlink.h>
49#include <linux/timer.h>
50#include <linux/platform_device.h>
51#include <linux/etherdevice.h>
52
53#include <asm/system.h>
54#include <asm/io.h>
55#include <asm/tsi108.h>
56
57#include "tsi108_eth.h"
58
59#define MII_READ_DELAY 10000 /* max link wait time in msec */
60
61#define TSI108_RXRING_LEN 256
62
63/* NOTE: The driver currently does not support receiving packets
64 * larger than the buffer size, so don't decrease this (unless you
65 * want to add such support).
66 */
67#define TSI108_RXBUF_SIZE 1536
68
69#define TSI108_TXRING_LEN 256
70
71#define TSI108_TX_INT_FREQ 64
72
73/* Check the phy status every half a second. */
74#define CHECK_PHY_INTERVAL (HZ/2)
75
76static int tsi108_init_one(struct platform_device *pdev);
77static int tsi108_ether_remove(struct platform_device *pdev);
78
79struct tsi108_prv_data {
80 void __iomem *regs; /* Base of normal regs */
81 void __iomem *phyregs; /* Base of register bank used for PHY access */
82
83 unsigned int phy; /* Index of PHY for this interface */
84 unsigned int irq_num;
85 unsigned int id;
86
87 struct timer_list timer;/* Timer that triggers the check phy function */
88 unsigned int rxtail; /* Next entry in rxring to read */
89 unsigned int rxhead; /* Next entry in rxring to give a new buffer */
90 unsigned int rxfree; /* Number of free, allocated RX buffers */
91
92 unsigned int rxpending; /* Non-zero if there are still descriptors
93 * to be processed from a previous descriptor
94 * interrupt condition that has been cleared */
95
96 unsigned int txtail; /* Next TX descriptor to check status on */
97 unsigned int txhead; /* Next TX descriptor to use */
98
99 /* Number of free TX descriptors. This could be calculated from
100 * rxhead and rxtail if one descriptor were left unused to disambiguate
101 * full and empty conditions, but it's simpler to just keep track
102 * explicitly. */
103
104 unsigned int txfree;
105
106 unsigned int phy_ok; /* The PHY is currently powered on. */
107
108 /* PHY status (duplex is 1 for half, 2 for full,
109 * so that the default 0 indicates that neither has
110 * yet been configured). */
111
112 unsigned int link_up;
113 unsigned int speed;
114 unsigned int duplex;
115
116 tx_desc *txring;
117 rx_desc *rxring;
118 struct sk_buff *txskbs[TSI108_TXRING_LEN];
119 struct sk_buff *rxskbs[TSI108_RXRING_LEN];
120
121 dma_addr_t txdma, rxdma;
122
123 /* txlock nests in misclock and phy_lock */
124
125 spinlock_t txlock, misclock;
126
127 /* stats is used to hold the upper bits of each hardware counter,
128 * and tmpstats is used to hold the full values for returning
129 * to the caller of get_stats(). They must be separate in case
130 * an overflow interrupt occurs before the stats are consumed.
131 */
132
133 struct net_device_stats stats;
134 struct net_device_stats tmpstats;
135
136 /* These stats are kept separate in hardware, thus require individual
137 * fields for handling carry. They are combined in get_stats.
138 */
139
140 unsigned long rx_fcs; /* Add to rx_frame_errors */
141 unsigned long rx_short_fcs; /* Add to rx_frame_errors */
142 unsigned long rx_long_fcs; /* Add to rx_frame_errors */
143 unsigned long rx_underruns; /* Add to rx_length_errors */
144 unsigned long rx_overruns; /* Add to rx_length_errors */
145
146 unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */
147 unsigned long tx_pause_drop; /* Add to tx_aborted_errors */
148
149 unsigned long mc_hash[16];
150 u32 msg_enable; /* debug message level */
151 struct mii_if_info mii_if;
152 unsigned int init_media;
153};
154
155/* Structure for a device driver */
156
157static struct platform_driver tsi_eth_driver = {
158 .probe = tsi108_init_one,
159 .remove = tsi108_ether_remove,
160 .driver = {
161 .name = "tsi-ethernet",
162 },
163};
164
165static void tsi108_timed_checker(unsigned long dev_ptr);
166
167static void dump_eth_one(struct net_device *dev)
168{
169 struct tsi108_prv_data *data = netdev_priv(dev);
170
171 printk("Dumping %s...\n", dev->name);
172 printk("intstat %x intmask %x phy_ok %d"
173 " link %d speed %d duplex %d\n",
174 TSI_READ(TSI108_EC_INTSTAT),
175 TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
176 data->link_up, data->speed, data->duplex);
177
178 printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
179 data->txhead, data->txtail, data->txfree,
180 TSI_READ(TSI108_EC_TXSTAT),
181 TSI_READ(TSI108_EC_TXESTAT),
182 TSI_READ(TSI108_EC_TXERR));
183
184 printk("RX: head %d, tail %d, free %d, stat %x,"
185 " estat %x, err %x, pending %d\n\n",
186 data->rxhead, data->rxtail, data->rxfree,
187 TSI_READ(TSI108_EC_RXSTAT),
188 TSI_READ(TSI108_EC_RXESTAT),
189 TSI_READ(TSI108_EC_RXERR), data->rxpending);
190}
191
192/* Synchronization is needed between the thread and up/down events.
193 * Note that the PHY is accessed through the same registers for both
194 * interfaces, so this can't be made interface-specific.
195 */
196
197static DEFINE_SPINLOCK(phy_lock);
198
199static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
200{
201 unsigned i;
202
203 TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
204 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
205 (reg << TSI108_MAC_MII_ADDR_REG));
206 TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
207 TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
208 for (i = 0; i < 100; i++) {
209 if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
210 (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
211 break;
212 udelay(10);
213 }
214
215 if (i == 100)
216 return 0xffff;
217 else
218 return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
219}
220
221static void tsi108_write_mii(struct tsi108_prv_data *data,
222 int reg, u16 val)
223{
224 unsigned i = 100;
225 TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
226 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
227 (reg << TSI108_MAC_MII_ADDR_REG));
228 TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
229 while (i--) {
230 if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
231 TSI108_MAC_MII_IND_BUSY))
232 break;
233 udelay(10);
234 }
235}
236
237static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
238{
239 struct tsi108_prv_data *data = netdev_priv(dev);
240 return tsi108_read_mii(data, reg);
241}
242
243static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
244{
245 struct tsi108_prv_data *data = netdev_priv(dev);
246 tsi108_write_mii(data, reg, val);
247}
248
249static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
250 int reg, u16 val)
251{
252 unsigned i = 1000;
253 TSI_WRITE(TSI108_MAC_MII_ADDR,
254 (0x1e << TSI108_MAC_MII_ADDR_PHY)
255 | (reg << TSI108_MAC_MII_ADDR_REG));
256 TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
257 while(i--) {
258 if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
259 return;
260 udelay(10);
261 }
262 printk(KERN_ERR "%s function time out \n", __FUNCTION__);
263}
264
265static int mii_speed(struct mii_if_info *mii)
266{
267 int advert, lpa, val, media;
268 int lpa2 = 0;
269 int speed;
270
271 if (!mii_link_ok(mii))
272 return 0;
273
274 val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
275 if ((val & BMSR_ANEGCOMPLETE) == 0)
276 return 0;
277
278 advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
279 lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
280 media = mii_nway_result(advert & lpa);
281
282 if (mii->supports_gmii)
283 lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
284
285 speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
286 (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
287 return speed;
288}
289
290static void tsi108_check_phy(struct net_device *dev)
291{
292 struct tsi108_prv_data *data = netdev_priv(dev);
293 u32 mac_cfg2_reg, portctrl_reg;
294 u32 duplex;
295 u32 speed;
296 unsigned long flags;
297
298 /* Do a dummy read, as for some reason the first read
299 * after a link becomes up returns link down, even if
300 * it's been a while since the link came up.
301 */
302
303 spin_lock_irqsave(&phy_lock, flags);
304
305 if (!data->phy_ok)
306 goto out;
307
308 tsi108_read_mii(data, MII_BMSR);
309
310 duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
311 data->init_media = 0;
312
313 if (netif_carrier_ok(dev)) {
314
315 speed = mii_speed(&data->mii_if);
316
317 if ((speed != data->speed) || duplex) {
318
319 mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
320 portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
321
322 mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
323
324 if (speed == 1000) {
325 mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
326 portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
327 } else {
328 mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
329 portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
330 }
331
332 data->speed = speed;
333
334 if (data->mii_if.full_duplex) {
335 mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
336 portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
337 data->duplex = 2;
338 } else {
339 mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
340 portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
341 data->duplex = 1;
342 }
343
344 TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
345 TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
346
347 if (data->link_up == 0) {
348 /* The manual says it can take 3-4 usecs for the speed change
349 * to take effect.
350 */
351 udelay(5);
352
353 spin_lock(&data->txlock);
354 if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
355 netif_wake_queue(dev);
356
357 data->link_up = 1;
358 spin_unlock(&data->txlock);
359 }
360 }
361
362 } else {
363 if (data->link_up == 1) {
364 netif_stop_queue(dev);
365 data->link_up = 0;
366 printk(KERN_NOTICE "%s : link is down\n", dev->name);
367 }
368
369 goto out;
370 }
371
372
373out:
374 spin_unlock_irqrestore(&phy_lock, flags);
375}
376
377static inline void
378tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
379 unsigned long *upper)
380{
381 if (carry & carry_bit)
382 *upper += carry_shift;
383}
384
385static void tsi108_stat_carry(struct net_device *dev)
386{
387 struct tsi108_prv_data *data = netdev_priv(dev);
388 u32 carry1, carry2;
389
390 spin_lock_irq(&data->misclock);
391
392 carry1 = TSI_READ(TSI108_STAT_CARRY1);
393 carry2 = TSI_READ(TSI108_STAT_CARRY2);
394
395 TSI_WRITE(TSI108_STAT_CARRY1, carry1);
396 TSI_WRITE(TSI108_STAT_CARRY2, carry2);
397
398 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
399 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
400
401 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
402 TSI108_STAT_RXPKTS_CARRY,
403 &data->stats.rx_packets);
404
405 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
406 TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
407
408 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
409 TSI108_STAT_RXMCAST_CARRY,
410 &data->stats.multicast);
411
412 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
413 TSI108_STAT_RXALIGN_CARRY,
414 &data->stats.rx_frame_errors);
415
416 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
417 TSI108_STAT_RXLENGTH_CARRY,
418 &data->stats.rx_length_errors);
419
420 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
421 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
422
423 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
424 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
425
426 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
427 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
428
429 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
430 TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
431
432 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
433 TSI108_STAT_RXDROP_CARRY,
434 &data->stats.rx_missed_errors);
435
436 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
437 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
438
439 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
440 TSI108_STAT_TXPKTS_CARRY,
441 &data->stats.tx_packets);
442
443 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
444 TSI108_STAT_TXEXDEF_CARRY,
445 &data->stats.tx_aborted_errors);
446
447 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
448 TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
449
450 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
451 TSI108_STAT_TXTCOL_CARRY,
452 &data->stats.collisions);
453
454 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
455 TSI108_STAT_TXPAUSEDROP_CARRY,
456 &data->tx_pause_drop);
457
458 spin_unlock_irq(&data->misclock);
459}
460
461/* Read a stat counter atomically with respect to carries.
462 * data->misclock must be held.
463 */
464static inline unsigned long
465tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
466 int carry_shift, unsigned long *upper)
467{
468 int carryreg;
469 unsigned long val;
470
471 if (reg < 0xb0)
472 carryreg = TSI108_STAT_CARRY1;
473 else
474 carryreg = TSI108_STAT_CARRY2;
475
476 again:
477 val = TSI_READ(reg) | *upper;
478
479 /* Check to see if it overflowed, but the interrupt hasn't
480 * been serviced yet. If so, handle the carry here, and
481 * try again.
482 */
483
484 if (unlikely(TSI_READ(carryreg) & carry_bit)) {
485 *upper += carry_shift;
486 TSI_WRITE(carryreg, carry_bit);
487 goto again;
488 }
489
490 return val;
491}
492
493static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
494{
495 unsigned long excol;
496
497 struct tsi108_prv_data *data = netdev_priv(dev);
498 spin_lock_irq(&data->misclock);
499
500 data->tmpstats.rx_packets =
501 tsi108_read_stat(data, TSI108_STAT_RXPKTS,
502 TSI108_STAT_CARRY1_RXPKTS,
503 TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
504
505 data->tmpstats.tx_packets =
506 tsi108_read_stat(data, TSI108_STAT_TXPKTS,
507 TSI108_STAT_CARRY2_TXPKTS,
508 TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
509
510 data->tmpstats.rx_bytes =
511 tsi108_read_stat(data, TSI108_STAT_RXBYTES,
512 TSI108_STAT_CARRY1_RXBYTES,
513 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
514
515 data->tmpstats.tx_bytes =
516 tsi108_read_stat(data, TSI108_STAT_TXBYTES,
517 TSI108_STAT_CARRY2_TXBYTES,
518 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
519
520 data->tmpstats.multicast =
521 tsi108_read_stat(data, TSI108_STAT_RXMCAST,
522 TSI108_STAT_CARRY1_RXMCAST,
523 TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
524
525 excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
526 TSI108_STAT_CARRY2_TXEXCOL,
527 TSI108_STAT_TXEXCOL_CARRY,
528 &data->tx_coll_abort);
529
530 data->tmpstats.collisions =
531 tsi108_read_stat(data, TSI108_STAT_TXTCOL,
532 TSI108_STAT_CARRY2_TXTCOL,
533 TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
534
535 data->tmpstats.collisions += excol;
536
537 data->tmpstats.rx_length_errors =
538 tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
539 TSI108_STAT_CARRY1_RXLENGTH,
540 TSI108_STAT_RXLENGTH_CARRY,
541 &data->stats.rx_length_errors);
542
543 data->tmpstats.rx_length_errors +=
544 tsi108_read_stat(data, TSI108_STAT_RXRUNT,
545 TSI108_STAT_CARRY1_RXRUNT,
546 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
547
548 data->tmpstats.rx_length_errors +=
549 tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
550 TSI108_STAT_CARRY1_RXJUMBO,
551 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
552
553 data->tmpstats.rx_frame_errors =
554 tsi108_read_stat(data, TSI108_STAT_RXALIGN,
555 TSI108_STAT_CARRY1_RXALIGN,
556 TSI108_STAT_RXALIGN_CARRY,
557 &data->stats.rx_frame_errors);
558
559 data->tmpstats.rx_frame_errors +=
560 tsi108_read_stat(data, TSI108_STAT_RXFCS,
561 TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
562 &data->rx_fcs);
563
564 data->tmpstats.rx_frame_errors +=
565 tsi108_read_stat(data, TSI108_STAT_RXFRAG,
566 TSI108_STAT_CARRY1_RXFRAG,
567 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
568
569 data->tmpstats.rx_missed_errors =
570 tsi108_read_stat(data, TSI108_STAT_RXDROP,
571 TSI108_STAT_CARRY1_RXDROP,
572 TSI108_STAT_RXDROP_CARRY,
573 &data->stats.rx_missed_errors);
574
575 /* These three are maintained by software. */
576 data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
577 data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
578
579 data->tmpstats.tx_aborted_errors =
580 tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
581 TSI108_STAT_CARRY2_TXEXDEF,
582 TSI108_STAT_TXEXDEF_CARRY,
583 &data->stats.tx_aborted_errors);
584
585 data->tmpstats.tx_aborted_errors +=
586 tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
587 TSI108_STAT_CARRY2_TXPAUSE,
588 TSI108_STAT_TXPAUSEDROP_CARRY,
589 &data->tx_pause_drop);
590
591 data->tmpstats.tx_aborted_errors += excol;
592
593 data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
594 data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
595 data->tmpstats.rx_crc_errors +
596 data->tmpstats.rx_frame_errors +
597 data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
598
599 spin_unlock_irq(&data->misclock);
600 return &data->tmpstats;
601}
602
603static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
604{
605 TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
606 TSI108_EC_RXQ_PTRHIGH_VALID);
607
608 TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
609 | TSI108_EC_RXCTRL_QUEUE0);
610}
611
612static void tsi108_restart_tx(struct tsi108_prv_data * data)
613{
614 TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
615 TSI108_EC_TXQ_PTRHIGH_VALID);
616
617 TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
618 TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
619}
620
621/* txlock must be held by caller, with IRQs disabled, and
622 * with permission to re-enable them when the lock is dropped.
623 */
624static void tsi108_complete_tx(struct net_device *dev)
625{
626 struct tsi108_prv_data *data = netdev_priv(dev);
627 int tx;
628 struct sk_buff *skb;
629 int release = 0;
630
631 while (!data->txfree || data->txhead != data->txtail) {
632 tx = data->txtail;
633
634 if (data->txring[tx].misc & TSI108_TX_OWN)
635 break;
636
637 skb = data->txskbs[tx];
638
639 if (!(data->txring[tx].misc & TSI108_TX_OK))
640 printk("%s: bad tx packet, misc %x\n",
641 dev->name, data->txring[tx].misc);
642
643 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
644 data->txfree++;
645
646 if (data->txring[tx].misc & TSI108_TX_EOF) {
647 dev_kfree_skb_any(skb);
648 release++;
649 }
650 }
651
652 if (release) {
653 if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
654 netif_wake_queue(dev);
655 }
656}
657
658static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
659{
660 struct tsi108_prv_data *data = netdev_priv(dev);
661 int frags = skb_shinfo(skb)->nr_frags + 1;
662 int i;
663
664 if (!data->phy_ok && net_ratelimit())
665 printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
666
667 if (!data->link_up) {
668 printk(KERN_ERR "%s: Transmit while link is down!\n",
669 dev->name);
670 netif_stop_queue(dev);
671 return NETDEV_TX_BUSY;
672 }
673
674 if (data->txfree < MAX_SKB_FRAGS + 1) {
675 netif_stop_queue(dev);
676
677 if (net_ratelimit())
678 printk(KERN_ERR "%s: Transmit with full tx ring!\n",
679 dev->name);
680 return NETDEV_TX_BUSY;
681 }
682
683 if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
684 netif_stop_queue(dev);
685 }
686
687 spin_lock_irq(&data->txlock);
688
689 for (i = 0; i < frags; i++) {
690 int misc = 0;
691 int tx = data->txhead;
692
693 /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
694 * the interrupt bit. TX descriptor-complete interrupts are
695 * enabled when the queue fills up, and masked when there is
696 * still free space. This way, when saturating the outbound
697 * link, the tx interrupts are kept to a reasonable level.
698 * When the queue is not full, reclamation of skbs still occurs
699 * as new packets are transmitted, or on a queue-empty
700 * interrupt.
701 */
702
703 if ((tx % TSI108_TX_INT_FREQ == 0) &&
704 ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
705 misc = TSI108_TX_INT;
706
707 data->txskbs[tx] = skb;
708
709 if (i == 0) {
710 data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
711 skb->len - skb->data_len, DMA_TO_DEVICE);
712 data->txring[tx].len = skb->len - skb->data_len;
713 misc |= TSI108_TX_SOF;
714 } else {
715 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
716
717 data->txring[tx].buf0 =
718 dma_map_page(NULL, frag->page, frag->page_offset,
719 frag->size, DMA_TO_DEVICE);
720 data->txring[tx].len = frag->size;
721 }
722
723 if (i == frags - 1)
724 misc |= TSI108_TX_EOF;
725
726 if (netif_msg_pktdata(data)) {
727 int i;
728 printk("%s: Tx Frame contents (%d)\n", dev->name,
729 skb->len);
730 for (i = 0; i < skb->len; i++)
731 printk(" %2.2x", skb->data[i]);
732 printk(".\n");
733 }
734 data->txring[tx].misc = misc | TSI108_TX_OWN;
735
736 data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
737 data->txfree--;
738 }
739
740 tsi108_complete_tx(dev);
741
742 /* This must be done after the check for completed tx descriptors,
743 * so that the tail pointer is correct.
744 */
745
746 if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
747 tsi108_restart_tx(data);
748
749 spin_unlock_irq(&data->txlock);
750 return NETDEV_TX_OK;
751}
752
753static int tsi108_complete_rx(struct net_device *dev, int budget)
754{
755 struct tsi108_prv_data *data = netdev_priv(dev);
756 int done = 0;
757
758 while (data->rxfree && done != budget) {
759 int rx = data->rxtail;
760 struct sk_buff *skb;
761
762 if (data->rxring[rx].misc & TSI108_RX_OWN)
763 break;
764
765 skb = data->rxskbs[rx];
766 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
767 data->rxfree--;
768 done++;
769
770 if (data->rxring[rx].misc & TSI108_RX_BAD) {
771 spin_lock_irq(&data->misclock);
772
773 if (data->rxring[rx].misc & TSI108_RX_CRC)
774 data->stats.rx_crc_errors++;
775 if (data->rxring[rx].misc & TSI108_RX_OVER)
776 data->stats.rx_fifo_errors++;
777
778 spin_unlock_irq(&data->misclock);
779
780 dev_kfree_skb_any(skb);
781 continue;
782 }
783 if (netif_msg_pktdata(data)) {
784 int i;
785 printk("%s: Rx Frame contents (%d)\n",
786 dev->name, data->rxring[rx].len);
787 for (i = 0; i < data->rxring[rx].len; i++)
788 printk(" %2.2x", skb->data[i]);
789 printk(".\n");
790 }
791
792 skb->dev = dev;
793 skb_put(skb, data->rxring[rx].len);
794 skb->protocol = eth_type_trans(skb, dev);
795 netif_receive_skb(skb);
796 dev->last_rx = jiffies;
797 }
798
799 return done;
800}
801
802static int tsi108_refill_rx(struct net_device *dev, int budget)
803{
804 struct tsi108_prv_data *data = netdev_priv(dev);
805 int done = 0;
806
807 while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
808 int rx = data->rxhead;
809 struct sk_buff *skb;
810
811 data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
812 if (!skb)
813 break;
814
815 skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
816
817 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
818 TSI108_RX_SKB_SIZE,
819 DMA_FROM_DEVICE);
820
821 /* Sometimes the hardware sets blen to zero after packet
822 * reception, even though the manual says that it's only ever
823 * modified by the driver.
824 */
825
826 data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
827 data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
828
829 data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
830 data->rxfree++;
831 done++;
832 }
833
834 if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
835 TSI108_EC_RXSTAT_QUEUE0))
836 tsi108_restart_rx(data, dev);
837
838 return done;
839}
840
841static int tsi108_poll(struct net_device *dev, int *budget)
842{
843 struct tsi108_prv_data *data = netdev_priv(dev);
844 u32 estat = TSI_READ(TSI108_EC_RXESTAT);
845 u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
846 int total_budget = min(*budget, dev->quota);
847 int num_received = 0, num_filled = 0, budget_used;
848
849 intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
850 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
851
852 TSI_WRITE(TSI108_EC_RXESTAT, estat);
853 TSI_WRITE(TSI108_EC_INTSTAT, intstat);
854
855 if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
856 num_received = tsi108_complete_rx(dev, total_budget);
857
858 /* This should normally fill no more slots than the number of
859 * packets received in tsi108_complete_rx(). The exception
860 * is when we previously ran out of memory for RX SKBs. In that
861 * case, it's helpful to obey the budget, not only so that the
862 * CPU isn't hogged, but so that memory (which may still be low)
863 * is not hogged by one device.
864 *
865 * A work unit is considered to be two SKBs to allow us to catch
866 * up when the ring has shrunk due to out-of-memory but we're
867 * still removing the full budget's worth of packets each time.
868 */
869
870 if (data->rxfree < TSI108_RXRING_LEN)
871 num_filled = tsi108_refill_rx(dev, total_budget * 2);
872
873 if (intstat & TSI108_INT_RXERROR) {
874 u32 err = TSI_READ(TSI108_EC_RXERR);
875 TSI_WRITE(TSI108_EC_RXERR, err);
876
877 if (err) {
878 if (net_ratelimit())
879 printk(KERN_DEBUG "%s: RX error %x\n",
880 dev->name, err);
881
882 if (!(TSI_READ(TSI108_EC_RXSTAT) &
883 TSI108_EC_RXSTAT_QUEUE0))
884 tsi108_restart_rx(data, dev);
885 }
886 }
887
888 if (intstat & TSI108_INT_RXOVERRUN) {
889 spin_lock_irq(&data->misclock);
890 data->stats.rx_fifo_errors++;
891 spin_unlock_irq(&data->misclock);
892 }
893
894 budget_used = max(num_received, num_filled / 2);
895
896 *budget -= budget_used;
897 dev->quota -= budget_used;
898
899 if (budget_used != total_budget) {
900 data->rxpending = 0;
901 netif_rx_complete(dev);
902
903 TSI_WRITE(TSI108_EC_INTMASK,
904 TSI_READ(TSI108_EC_INTMASK)
905 & ~(TSI108_INT_RXQUEUE0
906 | TSI108_INT_RXTHRESH |
907 TSI108_INT_RXOVERRUN |
908 TSI108_INT_RXERROR |
909 TSI108_INT_RXWAIT));
910
911 /* IRQs are level-triggered, so no need to re-check */
912 return 0;
913 } else {
914 data->rxpending = 1;
915 }
916
917 return 1;
918}
919
920static void tsi108_rx_int(struct net_device *dev)
921{
922 struct tsi108_prv_data *data = netdev_priv(dev);
923
924 /* A race could cause dev to already be scheduled, so it's not an
925 * error if that happens (and interrupts shouldn't be re-masked,
926 * because that can cause harmful races, if poll has already
927 * unmasked them but not cleared LINK_STATE_SCHED).
928 *
929 * This can happen if this code races with tsi108_poll(), which masks
930 * the interrupts after tsi108_irq_one() read the mask, but before
931 * netif_rx_schedule is called. It could also happen due to calls
932 * from tsi108_check_rxring().
933 */
934
935 if (netif_rx_schedule_prep(dev)) {
936 /* Mask, rather than ack, the receive interrupts. The ack
937 * will happen in tsi108_poll().
938 */
939
940 TSI_WRITE(TSI108_EC_INTMASK,
941 TSI_READ(TSI108_EC_INTMASK) |
942 TSI108_INT_RXQUEUE0
943 | TSI108_INT_RXTHRESH |
944 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
945 TSI108_INT_RXWAIT);
946 __netif_rx_schedule(dev);
947 } else {
948 if (!netif_running(dev)) {
949 /* This can happen if an interrupt occurs while the
950 * interface is being brought down, as the START
951 * bit is cleared before the stop function is called.
952 *
953 * In this case, the interrupts must be masked, or
954 * they will continue indefinitely.
955 *
956 * There's a race here if the interface is brought down
957 * and then up in rapid succession, as the device could
958 * be made running after the above check and before
959 * the masking below. This will only happen if the IRQ
960 * thread has a lower priority than the task brining
961 * up the interface. Fixing this race would likely
962 * require changes in generic code.
963 */
964
965 TSI_WRITE(TSI108_EC_INTMASK,
966 TSI_READ
967 (TSI108_EC_INTMASK) |
968 TSI108_INT_RXQUEUE0 |
969 TSI108_INT_RXTHRESH |
970 TSI108_INT_RXOVERRUN |
971 TSI108_INT_RXERROR |
972 TSI108_INT_RXWAIT);
973 }
974 }
975}
976
977/* If the RX ring has run out of memory, try periodically
978 * to allocate some more, as otherwise poll would never
979 * get called (apart from the initial end-of-queue condition).
980 *
981 * This is called once per second (by default) from the thread.
982 */
983
984static void tsi108_check_rxring(struct net_device *dev)
985{
986 struct tsi108_prv_data *data = netdev_priv(dev);
987
988 /* A poll is scheduled, as opposed to caling tsi108_refill_rx
989 * directly, so as to keep the receive path single-threaded
990 * (and thus not needing a lock).
991 */
992
993 if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
994 tsi108_rx_int(dev);
995}
996
997static void tsi108_tx_int(struct net_device *dev)
998{
999 struct tsi108_prv_data *data = netdev_priv(dev);
1000 u32 estat = TSI_READ(TSI108_EC_TXESTAT);
1001
1002 TSI_WRITE(TSI108_EC_TXESTAT, estat);
1003 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
1004 TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
1005 if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
1006 u32 err = TSI_READ(TSI108_EC_TXERR);
1007 TSI_WRITE(TSI108_EC_TXERR, err);
1008
1009 if (err && net_ratelimit())
1010 printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
1011 }
1012
1013 if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
1014 spin_lock(&data->txlock);
1015 tsi108_complete_tx(dev);
1016 spin_unlock(&data->txlock);
1017 }
1018}
1019
1020
1021static irqreturn_t tsi108_irq(int irq, void *dev_id)
1022{
1023 struct net_device *dev = dev_id;
1024 struct tsi108_prv_data *data = netdev_priv(dev);
1025 u32 stat = TSI_READ(TSI108_EC_INTSTAT);
1026
1027 if (!(stat & TSI108_INT_ANY))
1028 return IRQ_NONE; /* Not our interrupt */
1029
1030 stat &= ~TSI_READ(TSI108_EC_INTMASK);
1031
1032 if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
1033 TSI108_INT_TXERROR))
1034 tsi108_tx_int(dev);
1035 if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
1036 TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
1037 TSI108_INT_RXERROR))
1038 tsi108_rx_int(dev);
1039
1040 if (stat & TSI108_INT_SFN) {
1041 if (net_ratelimit())
1042 printk(KERN_DEBUG "%s: SFN error\n", dev->name);
1043 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
1044 }
1045
1046 if (stat & TSI108_INT_STATCARRY) {
1047 tsi108_stat_carry(dev);
1048 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
1049 }
1050
1051 return IRQ_HANDLED;
1052}
1053
1054static void tsi108_stop_ethernet(struct net_device *dev)
1055{
1056 struct tsi108_prv_data *data = netdev_priv(dev);
1057 int i = 1000;
1058 /* Disable all TX and RX queues ... */
1059 TSI_WRITE(TSI108_EC_TXCTRL, 0);
1060 TSI_WRITE(TSI108_EC_RXCTRL, 0);
1061
1062 /* ...and wait for them to become idle */
1063 while(i--) {
1064 if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
1065 break;
1066 udelay(10);
1067 }
1068 i = 1000;
1069 while(i--){
1070 if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
1071 return;
1072 udelay(10);
1073 }
1074 printk(KERN_ERR "%s function time out \n", __FUNCTION__);
1075}
1076
1077static void tsi108_reset_ether(struct tsi108_prv_data * data)
1078{
1079 TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
1080 udelay(100);
1081 TSI_WRITE(TSI108_MAC_CFG1, 0);
1082
1083 TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
1084 udelay(100);
1085 TSI_WRITE(TSI108_EC_PORTCTRL,
1086 TSI_READ(TSI108_EC_PORTCTRL) &
1087 ~TSI108_EC_PORTCTRL_STATRST);
1088
1089 TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
1090 udelay(100);
1091 TSI_WRITE(TSI108_EC_TXCFG,
1092 TSI_READ(TSI108_EC_TXCFG) &
1093 ~TSI108_EC_TXCFG_RST);
1094
1095 TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
1096 udelay(100);
1097 TSI_WRITE(TSI108_EC_RXCFG,
1098 TSI_READ(TSI108_EC_RXCFG) &
1099 ~TSI108_EC_RXCFG_RST);
1100
1101 TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1102 TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
1103 TSI108_MAC_MII_MGMT_RST);
1104 udelay(100);
1105 TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1106 (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
1107 ~(TSI108_MAC_MII_MGMT_RST |
1108 TSI108_MAC_MII_MGMT_CLK)) | 0x07);
1109}
1110
1111static int tsi108_get_mac(struct net_device *dev)
1112{
1113 struct tsi108_prv_data *data = netdev_priv(dev);
1114 u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
1115 u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
1116
1117 /* Note that the octets are reversed from what the manual says,
1118 * producing an even weirder ordering...
1119 */
1120 if (word2 == 0 && word1 == 0) {
1121 dev->dev_addr[0] = 0x00;
1122 dev->dev_addr[1] = 0x06;
1123 dev->dev_addr[2] = 0xd2;
1124 dev->dev_addr[3] = 0x00;
1125 dev->dev_addr[4] = 0x00;
1126 if (0x8 == data->phy)
1127 dev->dev_addr[5] = 0x01;
1128 else
1129 dev->dev_addr[5] = 0x02;
1130
1131 word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1132
1133 word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1134 (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1135
1136 TSI_WRITE(TSI108_MAC_ADDR1, word1);
1137 TSI_WRITE(TSI108_MAC_ADDR2, word2);
1138 } else {
1139 dev->dev_addr[0] = (word2 >> 16) & 0xff;
1140 dev->dev_addr[1] = (word2 >> 24) & 0xff;
1141 dev->dev_addr[2] = (word1 >> 0) & 0xff;
1142 dev->dev_addr[3] = (word1 >> 8) & 0xff;
1143 dev->dev_addr[4] = (word1 >> 16) & 0xff;
1144 dev->dev_addr[5] = (word1 >> 24) & 0xff;
1145 }
1146
1147 if (!is_valid_ether_addr(dev->dev_addr)) {
1148 printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
1155static int tsi108_set_mac(struct net_device *dev, void *addr)
1156{
1157 struct tsi108_prv_data *data = netdev_priv(dev);
1158 u32 word1, word2;
1159 int i;
1160
1161 if (!is_valid_ether_addr(addr))
1162 return -EINVAL;
1163
1164 for (i = 0; i < 6; i++)
1165 /* +2 is for the offset of the HW addr type */
1166 dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
1167
1168 word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1169
1170 word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1171 (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1172
1173 spin_lock_irq(&data->misclock);
1174 TSI_WRITE(TSI108_MAC_ADDR1, word1);
1175 TSI_WRITE(TSI108_MAC_ADDR2, word2);
1176 spin_lock(&data->txlock);
1177
1178 if (data->txfree && data->link_up)
1179 netif_wake_queue(dev);
1180
1181 spin_unlock(&data->txlock);
1182 spin_unlock_irq(&data->misclock);
1183 return 0;
1184}
1185
1186/* Protected by dev->xmit_lock. */
1187static void tsi108_set_rx_mode(struct net_device *dev)
1188{
1189 struct tsi108_prv_data *data = netdev_priv(dev);
1190 u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
1191
1192 if (dev->flags & IFF_PROMISC) {
1193 rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
1194 rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
1195 goto out;
1196 }
1197
1198 rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
1199
1200 if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
1201 int i;
1202 struct dev_mc_list *mc = dev->mc_list;
1203 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1204
1205 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1206
1207 while (mc) {
1208 u32 hash, crc;
1209
1210 if (mc->dmi_addrlen == 6) {
1211 crc = ether_crc(6, mc->dmi_addr);
1212 hash = crc >> 23;
1213
1214 __set_bit(hash, &data->mc_hash[0]);
1215 } else {
1216 printk(KERN_ERR
1217 "%s: got multicast address of length %d "
1218 "instead of 6.\n", dev->name,
1219 mc->dmi_addrlen);
1220 }
1221
1222 mc = mc->next;
1223 }
1224
1225 TSI_WRITE(TSI108_EC_HASHADDR,
1226 TSI108_EC_HASHADDR_AUTOINC |
1227 TSI108_EC_HASHADDR_MCAST);
1228
1229 for (i = 0; i < 16; i++) {
1230 /* The manual says that the hardware may drop
1231 * back-to-back writes to the data register.
1232 */
1233 udelay(1);
1234 TSI_WRITE(TSI108_EC_HASHDATA,
1235 data->mc_hash[i]);
1236 }
1237 }
1238
1239 out:
1240 TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
1241}
1242
1243static void tsi108_init_phy(struct net_device *dev)
1244{
1245 struct tsi108_prv_data *data = netdev_priv(dev);
1246 u32 i = 0;
1247 u16 phyval = 0;
1248 unsigned long flags;
1249
1250 spin_lock_irqsave(&phy_lock, flags);
1251
1252 tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
1253 while (i--){
1254 if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
1255 break;
1256 udelay(10);
1257 }
1258 if (i == 0)
1259 printk(KERN_ERR "%s function time out \n", __FUNCTION__);
1260
1261#if (TSI108_PHY_TYPE == PHY_BCM54XX) /* Broadcom BCM54xx PHY */
1262 tsi108_write_mii(data, 0x09, 0x0300);
1263 tsi108_write_mii(data, 0x10, 0x1020);
1264 tsi108_write_mii(data, 0x1c, 0x8c00);
1265#endif
1266
1267 tsi108_write_mii(data,
1268 MII_BMCR,
1269 BMCR_ANENABLE | BMCR_ANRESTART);
1270 while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
1271 cpu_relax();
1272
1273 /* Set G/MII mode and receive clock select in TBI control #2. The
1274 * second port won't work if this isn't done, even though we don't
1275 * use TBI mode.
1276 */
1277
1278 tsi108_write_tbi(data, 0x11, 0x30);
1279
1280 /* FIXME: It seems to take more than 2 back-to-back reads to the
1281 * PHY_STAT register before the link up status bit is set.
1282 */
1283
1284 data->link_up = 1;
1285
1286 while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
1287 BMSR_LSTATUS)) {
1288 if (i++ > (MII_READ_DELAY / 10)) {
1289 data->link_up = 0;
1290 break;
1291 }
1292 spin_unlock_irqrestore(&phy_lock, flags);
1293 msleep(10);
1294 spin_lock_irqsave(&phy_lock, flags);
1295 }
1296
1297 printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
1298 data->phy_ok = 1;
1299 data->init_media = 1;
1300 spin_unlock_irqrestore(&phy_lock, flags);
1301}
1302
1303static void tsi108_kill_phy(struct net_device *dev)
1304{
1305 struct tsi108_prv_data *data = netdev_priv(dev);
1306 unsigned long flags;
1307
1308 spin_lock_irqsave(&phy_lock, flags);
1309 tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
1310 data->phy_ok = 0;
1311 spin_unlock_irqrestore(&phy_lock, flags);
1312}
1313
1314static int tsi108_open(struct net_device *dev)
1315{
1316 int i;
1317 struct tsi108_prv_data *data = netdev_priv(dev);
1318 unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
1319 unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
1320
1321 i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
1322 if (i != 0) {
1323 printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
1324 data->id, data->irq_num);
1325 return i;
1326 } else {
1327 dev->irq = data->irq_num;
1328 printk(KERN_NOTICE
1329 "tsi108_open : Port %d Assigned IRQ %d to %s\n",
1330 data->id, dev->irq, dev->name);
1331 }
1332
1333 data->rxring = dma_alloc_coherent(NULL, rxring_size,
1334 &data->rxdma, GFP_KERNEL);
1335
1336 if (!data->rxring) {
1337 printk(KERN_DEBUG
1338 "TSI108_ETH: failed to allocate memory for rxring!\n");
1339 return -ENOMEM;
1340 } else {
1341 memset(data->rxring, 0, rxring_size);
1342 }
1343
1344 data->txring = dma_alloc_coherent(NULL, txring_size,
1345 &data->txdma, GFP_KERNEL);
1346
1347 if (!data->txring) {
1348 printk(KERN_DEBUG
1349 "TSI108_ETH: failed to allocate memory for txring!\n");
1350 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1351 return -ENOMEM;
1352 } else {
1353 memset(data->txring, 0, txring_size);
1354 }
1355
1356 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1357 data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
1358 data->rxring[i].blen = TSI108_RXBUF_SIZE;
1359 data->rxring[i].vlan = 0;
1360 }
1361
1362 data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
1363
1364 data->rxtail = 0;
1365 data->rxhead = 0;
1366
1367 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1368 struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
1369
1370 if (!skb) {
1371 /* Bah. No memory for now, but maybe we'll get
1372 * some more later.
1373 * For now, we'll live with the smaller ring.
1374 */
1375 printk(KERN_WARNING
1376 "%s: Could only allocate %d receive skb(s).\n",
1377 dev->name, i);
1378 data->rxhead = i;
1379 break;
1380 }
1381
1382 data->rxskbs[i] = skb;
1383 /* Align the payload on a 4-byte boundary */
1384 skb_reserve(skb, 2);
1385 data->rxskbs[i] = skb;
1386 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1387 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1388 }
1389
1390 data->rxfree = i;
1391 TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
1392
1393 for (i = 0; i < TSI108_TXRING_LEN; i++) {
1394 data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
1395 data->txring[i].misc = 0;
1396 }
1397
1398 data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
1399 data->txtail = 0;
1400 data->txhead = 0;
1401 data->txfree = TSI108_TXRING_LEN;
1402 TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
1403 tsi108_init_phy(dev);
1404
1405 setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
1406 mod_timer(&data->timer, jiffies + 1);
1407
1408 tsi108_restart_rx(data, dev);
1409
1410 TSI_WRITE(TSI108_EC_INTSTAT, ~0);
1411
1412 TSI_WRITE(TSI108_EC_INTMASK,
1413 ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
1414 TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
1415 TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
1416 TSI108_INT_SFN | TSI108_INT_STATCARRY));
1417
1418 TSI_WRITE(TSI108_MAC_CFG1,
1419 TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
1420 netif_start_queue(dev);
1421 return 0;
1422}
1423
1424static int tsi108_close(struct net_device *dev)
1425{
1426 struct tsi108_prv_data *data = netdev_priv(dev);
1427
1428 netif_stop_queue(dev);
1429
1430 del_timer_sync(&data->timer);
1431
1432 tsi108_stop_ethernet(dev);
1433 tsi108_kill_phy(dev);
1434 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1435 TSI_WRITE(TSI108_MAC_CFG1, 0);
1436
1437 /* Check for any pending TX packets, and drop them. */
1438
1439 while (!data->txfree || data->txhead != data->txtail) {
1440 int tx = data->txtail;
1441 struct sk_buff *skb;
1442 skb = data->txskbs[tx];
1443 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
1444 data->txfree++;
1445 dev_kfree_skb(skb);
1446 }
1447
1448 synchronize_irq(data->irq_num);
1449 free_irq(data->irq_num, dev);
1450
1451 /* Discard the RX ring. */
1452
1453 while (data->rxfree) {
1454 int rx = data->rxtail;
1455 struct sk_buff *skb;
1456
1457 skb = data->rxskbs[rx];
1458 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
1459 data->rxfree--;
1460 dev_kfree_skb(skb);
1461 }
1462
1463 dma_free_coherent(0,
1464 TSI108_RXRING_LEN * sizeof(rx_desc),
1465 data->rxring, data->rxdma);
1466 dma_free_coherent(0,
1467 TSI108_TXRING_LEN * sizeof(tx_desc),
1468 data->txring, data->txdma);
1469
1470 return 0;
1471}
1472
1473static void tsi108_init_mac(struct net_device *dev)
1474{
1475 struct tsi108_prv_data *data = netdev_priv(dev);
1476
1477 TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
1478 TSI108_MAC_CFG2_PADCRC);
1479
1480 TSI_WRITE(TSI108_EC_TXTHRESH,
1481 (192 << TSI108_EC_TXTHRESH_STARTFILL) |
1482 (192 << TSI108_EC_TXTHRESH_STOPFILL));
1483
1484 TSI_WRITE(TSI108_STAT_CARRYMASK1,
1485 ~(TSI108_STAT_CARRY1_RXBYTES |
1486 TSI108_STAT_CARRY1_RXPKTS |
1487 TSI108_STAT_CARRY1_RXFCS |
1488 TSI108_STAT_CARRY1_RXMCAST |
1489 TSI108_STAT_CARRY1_RXALIGN |
1490 TSI108_STAT_CARRY1_RXLENGTH |
1491 TSI108_STAT_CARRY1_RXRUNT |
1492 TSI108_STAT_CARRY1_RXJUMBO |
1493 TSI108_STAT_CARRY1_RXFRAG |
1494 TSI108_STAT_CARRY1_RXJABBER |
1495 TSI108_STAT_CARRY1_RXDROP));
1496
1497 TSI_WRITE(TSI108_STAT_CARRYMASK2,
1498 ~(TSI108_STAT_CARRY2_TXBYTES |
1499 TSI108_STAT_CARRY2_TXPKTS |
1500 TSI108_STAT_CARRY2_TXEXDEF |
1501 TSI108_STAT_CARRY2_TXEXCOL |
1502 TSI108_STAT_CARRY2_TXTCOL |
1503 TSI108_STAT_CARRY2_TXPAUSE));
1504
1505 TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
1506 TSI_WRITE(TSI108_MAC_CFG1, 0);
1507
1508 TSI_WRITE(TSI108_EC_RXCFG,
1509 TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
1510
1511 TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
1512 TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
1513 TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1514 TSI108_EC_TXQ_CFG_SFNPORT));
1515
1516 TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
1517 TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
1518 TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1519 TSI108_EC_RXQ_CFG_SFNPORT));
1520
1521 TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
1522 TSI108_EC_TXQ_BUFCFG_BURST256 |
1523 TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1524 TSI108_EC_TXQ_BUFCFG_SFNPORT));
1525
1526 TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
1527 TSI108_EC_RXQ_BUFCFG_BURST256 |
1528 TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1529 TSI108_EC_RXQ_BUFCFG_SFNPORT));
1530
1531 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1532}
1533
1534static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1535{
1536 struct tsi108_prv_data *data = netdev_priv(dev);
1537 return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
1538}
1539
1540static int
1541tsi108_init_one(struct platform_device *pdev)
1542{
1543 struct net_device *dev = NULL;
1544 struct tsi108_prv_data *data = NULL;
1545 hw_info *einfo;
1546 int err = 0;
1547
1548 einfo = pdev->dev.platform_data;
1549
1550 if (NULL == einfo) {
1551 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
1552 pdev->id);
1553 return -ENODEV;
1554 }
1555
1556 /* Create an ethernet device instance */
1557
1558 dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
1559 if (!dev) {
1560 printk("tsi108_eth: Could not allocate a device structure\n");
1561 return -ENOMEM;
1562 }
1563
1564 printk("tsi108_eth%d: probe...\n", pdev->id);
1565 data = netdev_priv(dev);
1566
1567 pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
1568 pdev->id, einfo->regs, einfo->phyregs,
1569 einfo->phy, einfo->irq_num);
1570
1571 data->regs = ioremap(einfo->regs, 0x400);
1572 if (NULL == data->regs) {
1573 err = -ENOMEM;
1574 goto regs_fail;
1575 }
1576
1577 data->phyregs = ioremap(einfo->phyregs, 0x400);
1578 if (NULL == data->phyregs) {
1579 err = -ENOMEM;
1580 goto regs_fail;
1581 }
1582/* MII setup */
1583 data->mii_if.dev = dev;
1584 data->mii_if.mdio_read = tsi108_mdio_read;
1585 data->mii_if.mdio_write = tsi108_mdio_write;
1586 data->mii_if.phy_id = einfo->phy;
1587 data->mii_if.phy_id_mask = 0x1f;
1588 data->mii_if.reg_num_mask = 0x1f;
1589 data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1590
1591 data->phy = einfo->phy;
1592 data->irq_num = einfo->irq_num;
1593 data->id = pdev->id;
1594 dev->open = tsi108_open;
1595 dev->stop = tsi108_close;
1596 dev->hard_start_xmit = tsi108_send_packet;
1597 dev->set_mac_address = tsi108_set_mac;
1598 dev->set_multicast_list = tsi108_set_rx_mode;
1599 dev->get_stats = tsi108_get_stats;
1600 dev->poll = tsi108_poll;
1601 dev->do_ioctl = tsi108_do_ioctl;
1602 dev->weight = 64; /* 64 is more suitable for GigE interface - klai */
1603
1604 /* Apparently, the Linux networking code won't use scatter-gather
1605 * if the hardware doesn't do checksums. However, it's faster
1606 * to checksum in place and use SG, as (among other reasons)
1607 * the cache won't be dirtied (which then has to be flushed
1608 * before DMA). The checksumming is done by the driver (via
1609 * a new function skb_csum_dev() in net/core/skbuff.c).
1610 */
1611
1612 dev->features = NETIF_F_HIGHDMA;
1613 SET_MODULE_OWNER(dev);
1614
1615 spin_lock_init(&data->txlock);
1616 spin_lock_init(&data->misclock);
1617
1618 tsi108_reset_ether(data);
1619 tsi108_kill_phy(dev);
1620
1621 if ((err = tsi108_get_mac(dev)) != 0) {
1622 printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n",
1623 dev->name);
1624 goto register_fail;
1625 }
1626
1627 tsi108_init_mac(dev);
1628 err = register_netdev(dev);
1629 if (err) {
1630 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1631 dev->name);
1632 goto register_fail;
1633 }
1634
1635 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
1636 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
1637 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1638 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1639#ifdef DEBUG
1640 data->msg_enable = DEBUG;
1641 dump_eth_one(dev);
1642#endif
1643
1644 return 0;
1645
1646register_fail:
1647 iounmap(data->regs);
1648 iounmap(data->phyregs);
1649
1650regs_fail:
1651 free_netdev(dev);
1652 return err;
1653}
1654
1655/* There's no way to either get interrupts from the PHY when
1656 * something changes, or to have the Tsi108 automatically communicate
1657 * with the PHY to reconfigure itself.
1658 *
1659 * Thus, we have to do it using a timer.
1660 */
1661
1662static void tsi108_timed_checker(unsigned long dev_ptr)
1663{
1664 struct net_device *dev = (struct net_device *)dev_ptr;
1665 struct tsi108_prv_data *data = netdev_priv(dev);
1666
1667 tsi108_check_phy(dev);
1668 tsi108_check_rxring(dev);
1669 mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
1670}
1671
1672static int tsi108_ether_init(void)
1673{
1674 int ret;
1675 ret = platform_driver_register (&tsi_eth_driver);
1676 if (ret < 0){
1677 printk("tsi108_ether_init: error initializing ethernet "
1678 "device\n");
1679 return ret;
1680 }
1681 return 0;
1682}
1683
1684static int tsi108_ether_remove(struct platform_device *pdev)
1685{
1686 struct net_device *dev = platform_get_drvdata(pdev);
1687 struct tsi108_prv_data *priv = netdev_priv(dev);
1688
1689 unregister_netdev(dev);
1690 tsi108_stop_ethernet(dev);
1691 platform_set_drvdata(pdev, NULL);
1692 iounmap(priv->regs);
1693 iounmap(priv->phyregs);
1694 free_netdev(dev);
1695
1696 return 0;
1697}
1698static void tsi108_ether_exit(void)
1699{
1700 platform_driver_unregister(&tsi_eth_driver);
1701}
1702
1703module_init(tsi108_ether_init);
1704module_exit(tsi108_ether_exit);
1705
1706MODULE_AUTHOR("Tundra Semiconductor Corporation");
1707MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
1708MODULE_LICENSE("GPL");