aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/tundra
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-07-30 04:15:34 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-12 06:41:04 -0400
commite75ed60cbaf6a2b5f14f00d96d926110f983be6b (patch)
treefcc6f765bfb782c34381b2801eba3c4d38077c99 /drivers/net/ethernet/tundra
parent679ec0ef08afde98fd5b2d1aa9fb3e50cce657a0 (diff)
tsi108*: Move the Tundra driver
Move the Tundra driver to drivers/net/ethernet/tundra/ and make the necessary Kocnfig and Makefile changes. CC: Kong Lai <kong.lai@tundra.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/tundra')
-rw-r--r--drivers/net/ethernet/tundra/Kconfig28
-rw-r--r--drivers/net/ethernet/tundra/Makefile5
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1727
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h356
4 files changed, 2116 insertions, 0 deletions
diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig
new file mode 100644
index 000000000000..03925d1aecb2
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Kconfig
@@ -0,0 +1,28 @@
1#
2# Tundra network device configuration
3#
4
5config NET_VENDOR_TUNDRA
6 bool "Tundra devices"
7 depends on TSI108_BRIDGE
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Tundra cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_TUNDRA
19
20config TSI108_ETH
21 tristate "Tundra TSI108 gigabit Ethernet support"
22 depends on TSI108_BRIDGE
23 ---help---
24 This driver supports Tundra TSI108 gigabit Ethernet ports.
25 To compile this driver as a module, choose M here: the module
26 will be called tsi108_eth.
27
28endif # NET_VENDOR_TUNDRA
diff --git a/drivers/net/ethernet/tundra/Makefile b/drivers/net/ethernet/tundra/Makefile
new file mode 100644
index 000000000000..439f6930235b
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Tundra network device drivers.
3#
4
5obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
new file mode 100644
index 000000000000..64cb9ac19ed9
--- /dev/null
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -0,0 +1,1727 @@
1/*******************************************************************************
2
3 Copyright(c) 2006 Tundra Semiconductor Corporation.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the Free
7 Software Foundation; either version 2 of the License, or (at your option)
8 any later version.
9
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 59
17 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19*******************************************************************************/
20
21/* This driver is based on the driver code originally developed
22 * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
23 * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation
24 *
25 * Currently changes from original version are:
26 * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
27 * - modifications to handle two ports independently and support for
28 * additional PHY devices (alexandre.bounine@tundra.com)
29 * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
30 *
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/net.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/skbuff.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44#include <linux/crc32.h>
45#include <linux/mii.h>
46#include <linux/device.h>
47#include <linux/pci.h>
48#include <linux/rtnetlink.h>
49#include <linux/timer.h>
50#include <linux/platform_device.h>
51#include <linux/gfp.h>
52
53#include <asm/system.h>
54#include <asm/io.h>
55#include <asm/tsi108.h>
56
57#include "tsi108_eth.h"
58
59#define MII_READ_DELAY 10000 /* max link wait time in msec */
60
61#define TSI108_RXRING_LEN 256
62
63/* NOTE: The driver currently does not support receiving packets
64 * larger than the buffer size, so don't decrease this (unless you
65 * want to add such support).
66 */
67#define TSI108_RXBUF_SIZE 1536
68
69#define TSI108_TXRING_LEN 256
70
71#define TSI108_TX_INT_FREQ 64
72
73/* Check the phy status every half a second. */
74#define CHECK_PHY_INTERVAL (HZ/2)
75
76static int tsi108_init_one(struct platform_device *pdev);
77static int tsi108_ether_remove(struct platform_device *pdev);
78
79struct tsi108_prv_data {
80 void __iomem *regs; /* Base of normal regs */
81 void __iomem *phyregs; /* Base of register bank used for PHY access */
82
83 struct net_device *dev;
84 struct napi_struct napi;
85
86 unsigned int phy; /* Index of PHY for this interface */
87 unsigned int irq_num;
88 unsigned int id;
89 unsigned int phy_type;
90
91 struct timer_list timer;/* Timer that triggers the check phy function */
92 unsigned int rxtail; /* Next entry in rxring to read */
93 unsigned int rxhead; /* Next entry in rxring to give a new buffer */
94 unsigned int rxfree; /* Number of free, allocated RX buffers */
95
96 unsigned int rxpending; /* Non-zero if there are still descriptors
97 * to be processed from a previous descriptor
98 * interrupt condition that has been cleared */
99
100 unsigned int txtail; /* Next TX descriptor to check status on */
101 unsigned int txhead; /* Next TX descriptor to use */
102
103 /* Number of free TX descriptors. This could be calculated from
104 * rxhead and rxtail if one descriptor were left unused to disambiguate
105 * full and empty conditions, but it's simpler to just keep track
106 * explicitly. */
107
108 unsigned int txfree;
109
110 unsigned int phy_ok; /* The PHY is currently powered on. */
111
112 /* PHY status (duplex is 1 for half, 2 for full,
113 * so that the default 0 indicates that neither has
114 * yet been configured). */
115
116 unsigned int link_up;
117 unsigned int speed;
118 unsigned int duplex;
119
120 tx_desc *txring;
121 rx_desc *rxring;
122 struct sk_buff *txskbs[TSI108_TXRING_LEN];
123 struct sk_buff *rxskbs[TSI108_RXRING_LEN];
124
125 dma_addr_t txdma, rxdma;
126
127 /* txlock nests in misclock and phy_lock */
128
129 spinlock_t txlock, misclock;
130
131 /* stats is used to hold the upper bits of each hardware counter,
132 * and tmpstats is used to hold the full values for returning
133 * to the caller of get_stats(). They must be separate in case
134 * an overflow interrupt occurs before the stats are consumed.
135 */
136
137 struct net_device_stats stats;
138 struct net_device_stats tmpstats;
139
140 /* These stats are kept separate in hardware, thus require individual
141 * fields for handling carry. They are combined in get_stats.
142 */
143
144 unsigned long rx_fcs; /* Add to rx_frame_errors */
145 unsigned long rx_short_fcs; /* Add to rx_frame_errors */
146 unsigned long rx_long_fcs; /* Add to rx_frame_errors */
147 unsigned long rx_underruns; /* Add to rx_length_errors */
148 unsigned long rx_overruns; /* Add to rx_length_errors */
149
150 unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */
151 unsigned long tx_pause_drop; /* Add to tx_aborted_errors */
152
153 unsigned long mc_hash[16];
154 u32 msg_enable; /* debug message level */
155 struct mii_if_info mii_if;
156 unsigned int init_media;
157};
158
159/* Structure for a device driver */
160
161static struct platform_driver tsi_eth_driver = {
162 .probe = tsi108_init_one,
163 .remove = tsi108_ether_remove,
164 .driver = {
165 .name = "tsi-ethernet",
166 .owner = THIS_MODULE,
167 },
168};
169
170static void tsi108_timed_checker(unsigned long dev_ptr);
171
172static void dump_eth_one(struct net_device *dev)
173{
174 struct tsi108_prv_data *data = netdev_priv(dev);
175
176 printk("Dumping %s...\n", dev->name);
177 printk("intstat %x intmask %x phy_ok %d"
178 " link %d speed %d duplex %d\n",
179 TSI_READ(TSI108_EC_INTSTAT),
180 TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
181 data->link_up, data->speed, data->duplex);
182
183 printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
184 data->txhead, data->txtail, data->txfree,
185 TSI_READ(TSI108_EC_TXSTAT),
186 TSI_READ(TSI108_EC_TXESTAT),
187 TSI_READ(TSI108_EC_TXERR));
188
189 printk("RX: head %d, tail %d, free %d, stat %x,"
190 " estat %x, err %x, pending %d\n\n",
191 data->rxhead, data->rxtail, data->rxfree,
192 TSI_READ(TSI108_EC_RXSTAT),
193 TSI_READ(TSI108_EC_RXESTAT),
194 TSI_READ(TSI108_EC_RXERR), data->rxpending);
195}
196
197/* Synchronization is needed between the thread and up/down events.
198 * Note that the PHY is accessed through the same registers for both
199 * interfaces, so this can't be made interface-specific.
200 */
201
202static DEFINE_SPINLOCK(phy_lock);
203
204static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
205{
206 unsigned i;
207
208 TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
209 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
210 (reg << TSI108_MAC_MII_ADDR_REG));
211 TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
212 TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
213 for (i = 0; i < 100; i++) {
214 if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
215 (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
216 break;
217 udelay(10);
218 }
219
220 if (i == 100)
221 return 0xffff;
222 else
223 return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
224}
225
226static void tsi108_write_mii(struct tsi108_prv_data *data,
227 int reg, u16 val)
228{
229 unsigned i = 100;
230 TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
231 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
232 (reg << TSI108_MAC_MII_ADDR_REG));
233 TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
234 while (i--) {
235 if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
236 TSI108_MAC_MII_IND_BUSY))
237 break;
238 udelay(10);
239 }
240}
241
242static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
243{
244 struct tsi108_prv_data *data = netdev_priv(dev);
245 return tsi108_read_mii(data, reg);
246}
247
248static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
249{
250 struct tsi108_prv_data *data = netdev_priv(dev);
251 tsi108_write_mii(data, reg, val);
252}
253
254static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
255 int reg, u16 val)
256{
257 unsigned i = 1000;
258 TSI_WRITE(TSI108_MAC_MII_ADDR,
259 (0x1e << TSI108_MAC_MII_ADDR_PHY)
260 | (reg << TSI108_MAC_MII_ADDR_REG));
261 TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
262 while(i--) {
263 if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
264 return;
265 udelay(10);
266 }
267 printk(KERN_ERR "%s function time out\n", __func__);
268}
269
270static int mii_speed(struct mii_if_info *mii)
271{
272 int advert, lpa, val, media;
273 int lpa2 = 0;
274 int speed;
275
276 if (!mii_link_ok(mii))
277 return 0;
278
279 val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
280 if ((val & BMSR_ANEGCOMPLETE) == 0)
281 return 0;
282
283 advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
284 lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
285 media = mii_nway_result(advert & lpa);
286
287 if (mii->supports_gmii)
288 lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
289
290 speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
291 (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
292 return speed;
293}
294
295static void tsi108_check_phy(struct net_device *dev)
296{
297 struct tsi108_prv_data *data = netdev_priv(dev);
298 u32 mac_cfg2_reg, portctrl_reg;
299 u32 duplex;
300 u32 speed;
301 unsigned long flags;
302
303 spin_lock_irqsave(&phy_lock, flags);
304
305 if (!data->phy_ok)
306 goto out;
307
308 duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
309 data->init_media = 0;
310
311 if (netif_carrier_ok(dev)) {
312
313 speed = mii_speed(&data->mii_if);
314
315 if ((speed != data->speed) || duplex) {
316
317 mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
318 portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
319
320 mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
321
322 if (speed == 1000) {
323 mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
324 portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
325 } else {
326 mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
327 portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
328 }
329
330 data->speed = speed;
331
332 if (data->mii_if.full_duplex) {
333 mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
334 portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
335 data->duplex = 2;
336 } else {
337 mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
338 portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
339 data->duplex = 1;
340 }
341
342 TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
343 TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
344 }
345
346 if (data->link_up == 0) {
347 /* The manual says it can take 3-4 usecs for the speed change
348 * to take effect.
349 */
350 udelay(5);
351
352 spin_lock(&data->txlock);
353 if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
354 netif_wake_queue(dev);
355
356 data->link_up = 1;
357 spin_unlock(&data->txlock);
358 }
359 } else {
360 if (data->link_up == 1) {
361 netif_stop_queue(dev);
362 data->link_up = 0;
363 printk(KERN_NOTICE "%s : link is down\n", dev->name);
364 }
365
366 goto out;
367 }
368
369
370out:
371 spin_unlock_irqrestore(&phy_lock, flags);
372}
373
374static inline void
375tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
376 unsigned long *upper)
377{
378 if (carry & carry_bit)
379 *upper += carry_shift;
380}
381
382static void tsi108_stat_carry(struct net_device *dev)
383{
384 struct tsi108_prv_data *data = netdev_priv(dev);
385 u32 carry1, carry2;
386
387 spin_lock_irq(&data->misclock);
388
389 carry1 = TSI_READ(TSI108_STAT_CARRY1);
390 carry2 = TSI_READ(TSI108_STAT_CARRY2);
391
392 TSI_WRITE(TSI108_STAT_CARRY1, carry1);
393 TSI_WRITE(TSI108_STAT_CARRY2, carry2);
394
395 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
396 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
397
398 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
399 TSI108_STAT_RXPKTS_CARRY,
400 &data->stats.rx_packets);
401
402 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
403 TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
404
405 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
406 TSI108_STAT_RXMCAST_CARRY,
407 &data->stats.multicast);
408
409 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
410 TSI108_STAT_RXALIGN_CARRY,
411 &data->stats.rx_frame_errors);
412
413 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
414 TSI108_STAT_RXLENGTH_CARRY,
415 &data->stats.rx_length_errors);
416
417 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
418 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
419
420 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
421 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
422
423 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
424 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
425
426 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
427 TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
428
429 tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
430 TSI108_STAT_RXDROP_CARRY,
431 &data->stats.rx_missed_errors);
432
433 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
434 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
435
436 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
437 TSI108_STAT_TXPKTS_CARRY,
438 &data->stats.tx_packets);
439
440 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
441 TSI108_STAT_TXEXDEF_CARRY,
442 &data->stats.tx_aborted_errors);
443
444 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
445 TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
446
447 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
448 TSI108_STAT_TXTCOL_CARRY,
449 &data->stats.collisions);
450
451 tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
452 TSI108_STAT_TXPAUSEDROP_CARRY,
453 &data->tx_pause_drop);
454
455 spin_unlock_irq(&data->misclock);
456}
457
458/* Read a stat counter atomically with respect to carries.
459 * data->misclock must be held.
460 */
461static inline unsigned long
462tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
463 int carry_shift, unsigned long *upper)
464{
465 int carryreg;
466 unsigned long val;
467
468 if (reg < 0xb0)
469 carryreg = TSI108_STAT_CARRY1;
470 else
471 carryreg = TSI108_STAT_CARRY2;
472
473 again:
474 val = TSI_READ(reg) | *upper;
475
476 /* Check to see if it overflowed, but the interrupt hasn't
477 * been serviced yet. If so, handle the carry here, and
478 * try again.
479 */
480
481 if (unlikely(TSI_READ(carryreg) & carry_bit)) {
482 *upper += carry_shift;
483 TSI_WRITE(carryreg, carry_bit);
484 goto again;
485 }
486
487 return val;
488}
489
490static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
491{
492 unsigned long excol;
493
494 struct tsi108_prv_data *data = netdev_priv(dev);
495 spin_lock_irq(&data->misclock);
496
497 data->tmpstats.rx_packets =
498 tsi108_read_stat(data, TSI108_STAT_RXPKTS,
499 TSI108_STAT_CARRY1_RXPKTS,
500 TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
501
502 data->tmpstats.tx_packets =
503 tsi108_read_stat(data, TSI108_STAT_TXPKTS,
504 TSI108_STAT_CARRY2_TXPKTS,
505 TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
506
507 data->tmpstats.rx_bytes =
508 tsi108_read_stat(data, TSI108_STAT_RXBYTES,
509 TSI108_STAT_CARRY1_RXBYTES,
510 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
511
512 data->tmpstats.tx_bytes =
513 tsi108_read_stat(data, TSI108_STAT_TXBYTES,
514 TSI108_STAT_CARRY2_TXBYTES,
515 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
516
517 data->tmpstats.multicast =
518 tsi108_read_stat(data, TSI108_STAT_RXMCAST,
519 TSI108_STAT_CARRY1_RXMCAST,
520 TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
521
522 excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
523 TSI108_STAT_CARRY2_TXEXCOL,
524 TSI108_STAT_TXEXCOL_CARRY,
525 &data->tx_coll_abort);
526
527 data->tmpstats.collisions =
528 tsi108_read_stat(data, TSI108_STAT_TXTCOL,
529 TSI108_STAT_CARRY2_TXTCOL,
530 TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
531
532 data->tmpstats.collisions += excol;
533
534 data->tmpstats.rx_length_errors =
535 tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
536 TSI108_STAT_CARRY1_RXLENGTH,
537 TSI108_STAT_RXLENGTH_CARRY,
538 &data->stats.rx_length_errors);
539
540 data->tmpstats.rx_length_errors +=
541 tsi108_read_stat(data, TSI108_STAT_RXRUNT,
542 TSI108_STAT_CARRY1_RXRUNT,
543 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
544
545 data->tmpstats.rx_length_errors +=
546 tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
547 TSI108_STAT_CARRY1_RXJUMBO,
548 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
549
550 data->tmpstats.rx_frame_errors =
551 tsi108_read_stat(data, TSI108_STAT_RXALIGN,
552 TSI108_STAT_CARRY1_RXALIGN,
553 TSI108_STAT_RXALIGN_CARRY,
554 &data->stats.rx_frame_errors);
555
556 data->tmpstats.rx_frame_errors +=
557 tsi108_read_stat(data, TSI108_STAT_RXFCS,
558 TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
559 &data->rx_fcs);
560
561 data->tmpstats.rx_frame_errors +=
562 tsi108_read_stat(data, TSI108_STAT_RXFRAG,
563 TSI108_STAT_CARRY1_RXFRAG,
564 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
565
566 data->tmpstats.rx_missed_errors =
567 tsi108_read_stat(data, TSI108_STAT_RXDROP,
568 TSI108_STAT_CARRY1_RXDROP,
569 TSI108_STAT_RXDROP_CARRY,
570 &data->stats.rx_missed_errors);
571
572 /* These three are maintained by software. */
573 data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
574 data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
575
576 data->tmpstats.tx_aborted_errors =
577 tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
578 TSI108_STAT_CARRY2_TXEXDEF,
579 TSI108_STAT_TXEXDEF_CARRY,
580 &data->stats.tx_aborted_errors);
581
582 data->tmpstats.tx_aborted_errors +=
583 tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
584 TSI108_STAT_CARRY2_TXPAUSE,
585 TSI108_STAT_TXPAUSEDROP_CARRY,
586 &data->tx_pause_drop);
587
588 data->tmpstats.tx_aborted_errors += excol;
589
590 data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
591 data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
592 data->tmpstats.rx_crc_errors +
593 data->tmpstats.rx_frame_errors +
594 data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
595
596 spin_unlock_irq(&data->misclock);
597 return &data->tmpstats;
598}
599
600static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
601{
602 TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
603 TSI108_EC_RXQ_PTRHIGH_VALID);
604
605 TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
606 | TSI108_EC_RXCTRL_QUEUE0);
607}
608
609static void tsi108_restart_tx(struct tsi108_prv_data * data)
610{
611 TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
612 TSI108_EC_TXQ_PTRHIGH_VALID);
613
614 TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
615 TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
616}
617
618/* txlock must be held by caller, with IRQs disabled, and
619 * with permission to re-enable them when the lock is dropped.
620 */
621static void tsi108_complete_tx(struct net_device *dev)
622{
623 struct tsi108_prv_data *data = netdev_priv(dev);
624 int tx;
625 struct sk_buff *skb;
626 int release = 0;
627
628 while (!data->txfree || data->txhead != data->txtail) {
629 tx = data->txtail;
630
631 if (data->txring[tx].misc & TSI108_TX_OWN)
632 break;
633
634 skb = data->txskbs[tx];
635
636 if (!(data->txring[tx].misc & TSI108_TX_OK))
637 printk("%s: bad tx packet, misc %x\n",
638 dev->name, data->txring[tx].misc);
639
640 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
641 data->txfree++;
642
643 if (data->txring[tx].misc & TSI108_TX_EOF) {
644 dev_kfree_skb_any(skb);
645 release++;
646 }
647 }
648
649 if (release) {
650 if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
651 netif_wake_queue(dev);
652 }
653}
654
655static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
656{
657 struct tsi108_prv_data *data = netdev_priv(dev);
658 int frags = skb_shinfo(skb)->nr_frags + 1;
659 int i;
660
661 if (!data->phy_ok && net_ratelimit())
662 printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
663
664 if (!data->link_up) {
665 printk(KERN_ERR "%s: Transmit while link is down!\n",
666 dev->name);
667 netif_stop_queue(dev);
668 return NETDEV_TX_BUSY;
669 }
670
671 if (data->txfree < MAX_SKB_FRAGS + 1) {
672 netif_stop_queue(dev);
673
674 if (net_ratelimit())
675 printk(KERN_ERR "%s: Transmit with full tx ring!\n",
676 dev->name);
677 return NETDEV_TX_BUSY;
678 }
679
680 if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
681 netif_stop_queue(dev);
682 }
683
684 spin_lock_irq(&data->txlock);
685
686 for (i = 0; i < frags; i++) {
687 int misc = 0;
688 int tx = data->txhead;
689
690 /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
691 * the interrupt bit. TX descriptor-complete interrupts are
692 * enabled when the queue fills up, and masked when there is
693 * still free space. This way, when saturating the outbound
694 * link, the tx interrupts are kept to a reasonable level.
695 * When the queue is not full, reclamation of skbs still occurs
696 * as new packets are transmitted, or on a queue-empty
697 * interrupt.
698 */
699
700 if ((tx % TSI108_TX_INT_FREQ == 0) &&
701 ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
702 misc = TSI108_TX_INT;
703
704 data->txskbs[tx] = skb;
705
706 if (i == 0) {
707 data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
708 skb_headlen(skb), DMA_TO_DEVICE);
709 data->txring[tx].len = skb_headlen(skb);
710 misc |= TSI108_TX_SOF;
711 } else {
712 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
713
714 data->txring[tx].buf0 =
715 dma_map_page(NULL, frag->page, frag->page_offset,
716 frag->size, DMA_TO_DEVICE);
717 data->txring[tx].len = frag->size;
718 }
719
720 if (i == frags - 1)
721 misc |= TSI108_TX_EOF;
722
723 if (netif_msg_pktdata(data)) {
724 int i;
725 printk("%s: Tx Frame contents (%d)\n", dev->name,
726 skb->len);
727 for (i = 0; i < skb->len; i++)
728 printk(" %2.2x", skb->data[i]);
729 printk(".\n");
730 }
731 data->txring[tx].misc = misc | TSI108_TX_OWN;
732
733 data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
734 data->txfree--;
735 }
736
737 tsi108_complete_tx(dev);
738
739 /* This must be done after the check for completed tx descriptors,
740 * so that the tail pointer is correct.
741 */
742
743 if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
744 tsi108_restart_tx(data);
745
746 spin_unlock_irq(&data->txlock);
747 return NETDEV_TX_OK;
748}
749
750static int tsi108_complete_rx(struct net_device *dev, int budget)
751{
752 struct tsi108_prv_data *data = netdev_priv(dev);
753 int done = 0;
754
755 while (data->rxfree && done != budget) {
756 int rx = data->rxtail;
757 struct sk_buff *skb;
758
759 if (data->rxring[rx].misc & TSI108_RX_OWN)
760 break;
761
762 skb = data->rxskbs[rx];
763 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
764 data->rxfree--;
765 done++;
766
767 if (data->rxring[rx].misc & TSI108_RX_BAD) {
768 spin_lock_irq(&data->misclock);
769
770 if (data->rxring[rx].misc & TSI108_RX_CRC)
771 data->stats.rx_crc_errors++;
772 if (data->rxring[rx].misc & TSI108_RX_OVER)
773 data->stats.rx_fifo_errors++;
774
775 spin_unlock_irq(&data->misclock);
776
777 dev_kfree_skb_any(skb);
778 continue;
779 }
780 if (netif_msg_pktdata(data)) {
781 int i;
782 printk("%s: Rx Frame contents (%d)\n",
783 dev->name, data->rxring[rx].len);
784 for (i = 0; i < data->rxring[rx].len; i++)
785 printk(" %2.2x", skb->data[i]);
786 printk(".\n");
787 }
788
789 skb_put(skb, data->rxring[rx].len);
790 skb->protocol = eth_type_trans(skb, dev);
791 netif_receive_skb(skb);
792 }
793
794 return done;
795}
796
797static int tsi108_refill_rx(struct net_device *dev, int budget)
798{
799 struct tsi108_prv_data *data = netdev_priv(dev);
800 int done = 0;
801
802 while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
803 int rx = data->rxhead;
804 struct sk_buff *skb;
805
806 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
807 data->rxskbs[rx] = skb;
808 if (!skb)
809 break;
810
811 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
812 TSI108_RX_SKB_SIZE,
813 DMA_FROM_DEVICE);
814
815 /* Sometimes the hardware sets blen to zero after packet
816 * reception, even though the manual says that it's only ever
817 * modified by the driver.
818 */
819
820 data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
821 data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
822
823 data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
824 data->rxfree++;
825 done++;
826 }
827
828 if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
829 TSI108_EC_RXSTAT_QUEUE0))
830 tsi108_restart_rx(data, dev);
831
832 return done;
833}
834
835static int tsi108_poll(struct napi_struct *napi, int budget)
836{
837 struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);
838 struct net_device *dev = data->dev;
839 u32 estat = TSI_READ(TSI108_EC_RXESTAT);
840 u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
841 int num_received = 0, num_filled = 0;
842
843 intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
844 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
845
846 TSI_WRITE(TSI108_EC_RXESTAT, estat);
847 TSI_WRITE(TSI108_EC_INTSTAT, intstat);
848
849 if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
850 num_received = tsi108_complete_rx(dev, budget);
851
852 /* This should normally fill no more slots than the number of
853 * packets received in tsi108_complete_rx(). The exception
854 * is when we previously ran out of memory for RX SKBs. In that
855 * case, it's helpful to obey the budget, not only so that the
856 * CPU isn't hogged, but so that memory (which may still be low)
857 * is not hogged by one device.
858 *
859 * A work unit is considered to be two SKBs to allow us to catch
860 * up when the ring has shrunk due to out-of-memory but we're
861 * still removing the full budget's worth of packets each time.
862 */
863
864 if (data->rxfree < TSI108_RXRING_LEN)
865 num_filled = tsi108_refill_rx(dev, budget * 2);
866
867 if (intstat & TSI108_INT_RXERROR) {
868 u32 err = TSI_READ(TSI108_EC_RXERR);
869 TSI_WRITE(TSI108_EC_RXERR, err);
870
871 if (err) {
872 if (net_ratelimit())
873 printk(KERN_DEBUG "%s: RX error %x\n",
874 dev->name, err);
875
876 if (!(TSI_READ(TSI108_EC_RXSTAT) &
877 TSI108_EC_RXSTAT_QUEUE0))
878 tsi108_restart_rx(data, dev);
879 }
880 }
881
882 if (intstat & TSI108_INT_RXOVERRUN) {
883 spin_lock_irq(&data->misclock);
884 data->stats.rx_fifo_errors++;
885 spin_unlock_irq(&data->misclock);
886 }
887
888 if (num_received < budget) {
889 data->rxpending = 0;
890 napi_complete(napi);
891
892 TSI_WRITE(TSI108_EC_INTMASK,
893 TSI_READ(TSI108_EC_INTMASK)
894 & ~(TSI108_INT_RXQUEUE0
895 | TSI108_INT_RXTHRESH |
896 TSI108_INT_RXOVERRUN |
897 TSI108_INT_RXERROR |
898 TSI108_INT_RXWAIT));
899 } else {
900 data->rxpending = 1;
901 }
902
903 return num_received;
904}
905
906static void tsi108_rx_int(struct net_device *dev)
907{
908 struct tsi108_prv_data *data = netdev_priv(dev);
909
910 /* A race could cause dev to already be scheduled, so it's not an
911 * error if that happens (and interrupts shouldn't be re-masked,
912 * because that can cause harmful races, if poll has already
913 * unmasked them but not cleared LINK_STATE_SCHED).
914 *
915 * This can happen if this code races with tsi108_poll(), which masks
916 * the interrupts after tsi108_irq_one() read the mask, but before
917 * napi_schedule is called. It could also happen due to calls
918 * from tsi108_check_rxring().
919 */
920
921 if (napi_schedule_prep(&data->napi)) {
922 /* Mask, rather than ack, the receive interrupts. The ack
923 * will happen in tsi108_poll().
924 */
925
926 TSI_WRITE(TSI108_EC_INTMASK,
927 TSI_READ(TSI108_EC_INTMASK) |
928 TSI108_INT_RXQUEUE0
929 | TSI108_INT_RXTHRESH |
930 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
931 TSI108_INT_RXWAIT);
932 __napi_schedule(&data->napi);
933 } else {
934 if (!netif_running(dev)) {
935 /* This can happen if an interrupt occurs while the
936 * interface is being brought down, as the START
937 * bit is cleared before the stop function is called.
938 *
939 * In this case, the interrupts must be masked, or
940 * they will continue indefinitely.
941 *
942 * There's a race here if the interface is brought down
943 * and then up in rapid succession, as the device could
944 * be made running after the above check and before
945 * the masking below. This will only happen if the IRQ
946 * thread has a lower priority than the task brining
947 * up the interface. Fixing this race would likely
948 * require changes in generic code.
949 */
950
951 TSI_WRITE(TSI108_EC_INTMASK,
952 TSI_READ
953 (TSI108_EC_INTMASK) |
954 TSI108_INT_RXQUEUE0 |
955 TSI108_INT_RXTHRESH |
956 TSI108_INT_RXOVERRUN |
957 TSI108_INT_RXERROR |
958 TSI108_INT_RXWAIT);
959 }
960 }
961}
962
963/* If the RX ring has run out of memory, try periodically
964 * to allocate some more, as otherwise poll would never
965 * get called (apart from the initial end-of-queue condition).
966 *
967 * This is called once per second (by default) from the thread.
968 */
969
970static void tsi108_check_rxring(struct net_device *dev)
971{
972 struct tsi108_prv_data *data = netdev_priv(dev);
973
974 /* A poll is scheduled, as opposed to caling tsi108_refill_rx
975 * directly, so as to keep the receive path single-threaded
976 * (and thus not needing a lock).
977 */
978
979 if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
980 tsi108_rx_int(dev);
981}
982
983static void tsi108_tx_int(struct net_device *dev)
984{
985 struct tsi108_prv_data *data = netdev_priv(dev);
986 u32 estat = TSI_READ(TSI108_EC_TXESTAT);
987
988 TSI_WRITE(TSI108_EC_TXESTAT, estat);
989 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
990 TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
991 if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
992 u32 err = TSI_READ(TSI108_EC_TXERR);
993 TSI_WRITE(TSI108_EC_TXERR, err);
994
995 if (err && net_ratelimit())
996 printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
997 }
998
999 if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
1000 spin_lock(&data->txlock);
1001 tsi108_complete_tx(dev);
1002 spin_unlock(&data->txlock);
1003 }
1004}
1005
1006
1007static irqreturn_t tsi108_irq(int irq, void *dev_id)
1008{
1009 struct net_device *dev = dev_id;
1010 struct tsi108_prv_data *data = netdev_priv(dev);
1011 u32 stat = TSI_READ(TSI108_EC_INTSTAT);
1012
1013 if (!(stat & TSI108_INT_ANY))
1014 return IRQ_NONE; /* Not our interrupt */
1015
1016 stat &= ~TSI_READ(TSI108_EC_INTMASK);
1017
1018 if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
1019 TSI108_INT_TXERROR))
1020 tsi108_tx_int(dev);
1021 if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
1022 TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
1023 TSI108_INT_RXERROR))
1024 tsi108_rx_int(dev);
1025
1026 if (stat & TSI108_INT_SFN) {
1027 if (net_ratelimit())
1028 printk(KERN_DEBUG "%s: SFN error\n", dev->name);
1029 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
1030 }
1031
1032 if (stat & TSI108_INT_STATCARRY) {
1033 tsi108_stat_carry(dev);
1034 TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
1035 }
1036
1037 return IRQ_HANDLED;
1038}
1039
1040static void tsi108_stop_ethernet(struct net_device *dev)
1041{
1042 struct tsi108_prv_data *data = netdev_priv(dev);
1043 int i = 1000;
1044 /* Disable all TX and RX queues ... */
1045 TSI_WRITE(TSI108_EC_TXCTRL, 0);
1046 TSI_WRITE(TSI108_EC_RXCTRL, 0);
1047
1048 /* ...and wait for them to become idle */
1049 while(i--) {
1050 if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
1051 break;
1052 udelay(10);
1053 }
1054 i = 1000;
1055 while(i--){
1056 if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
1057 return;
1058 udelay(10);
1059 }
1060 printk(KERN_ERR "%s function time out\n", __func__);
1061}
1062
1063static void tsi108_reset_ether(struct tsi108_prv_data * data)
1064{
1065 TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
1066 udelay(100);
1067 TSI_WRITE(TSI108_MAC_CFG1, 0);
1068
1069 TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
1070 udelay(100);
1071 TSI_WRITE(TSI108_EC_PORTCTRL,
1072 TSI_READ(TSI108_EC_PORTCTRL) &
1073 ~TSI108_EC_PORTCTRL_STATRST);
1074
1075 TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
1076 udelay(100);
1077 TSI_WRITE(TSI108_EC_TXCFG,
1078 TSI_READ(TSI108_EC_TXCFG) &
1079 ~TSI108_EC_TXCFG_RST);
1080
1081 TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
1082 udelay(100);
1083 TSI_WRITE(TSI108_EC_RXCFG,
1084 TSI_READ(TSI108_EC_RXCFG) &
1085 ~TSI108_EC_RXCFG_RST);
1086
1087 TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1088 TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
1089 TSI108_MAC_MII_MGMT_RST);
1090 udelay(100);
1091 TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1092 (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
1093 ~(TSI108_MAC_MII_MGMT_RST |
1094 TSI108_MAC_MII_MGMT_CLK)) | 0x07);
1095}
1096
1097static int tsi108_get_mac(struct net_device *dev)
1098{
1099 struct tsi108_prv_data *data = netdev_priv(dev);
1100 u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
1101 u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
1102
1103 /* Note that the octets are reversed from what the manual says,
1104 * producing an even weirder ordering...
1105 */
1106 if (word2 == 0 && word1 == 0) {
1107 dev->dev_addr[0] = 0x00;
1108 dev->dev_addr[1] = 0x06;
1109 dev->dev_addr[2] = 0xd2;
1110 dev->dev_addr[3] = 0x00;
1111 dev->dev_addr[4] = 0x00;
1112 if (0x8 == data->phy)
1113 dev->dev_addr[5] = 0x01;
1114 else
1115 dev->dev_addr[5] = 0x02;
1116
1117 word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1118
1119 word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1120 (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1121
1122 TSI_WRITE(TSI108_MAC_ADDR1, word1);
1123 TSI_WRITE(TSI108_MAC_ADDR2, word2);
1124 } else {
1125 dev->dev_addr[0] = (word2 >> 16) & 0xff;
1126 dev->dev_addr[1] = (word2 >> 24) & 0xff;
1127 dev->dev_addr[2] = (word1 >> 0) & 0xff;
1128 dev->dev_addr[3] = (word1 >> 8) & 0xff;
1129 dev->dev_addr[4] = (word1 >> 16) & 0xff;
1130 dev->dev_addr[5] = (word1 >> 24) & 0xff;
1131 }
1132
1133 if (!is_valid_ether_addr(dev->dev_addr)) {
1134 printk(KERN_ERR
1135 "%s: Invalid MAC address. word1: %08x, word2: %08x\n",
1136 dev->name, word1, word2);
1137 return -EINVAL;
1138 }
1139
1140 return 0;
1141}
1142
1143static int tsi108_set_mac(struct net_device *dev, void *addr)
1144{
1145 struct tsi108_prv_data *data = netdev_priv(dev);
1146 u32 word1, word2;
1147 int i;
1148
1149 if (!is_valid_ether_addr(addr))
1150 return -EINVAL;
1151
1152 for (i = 0; i < 6; i++)
1153 /* +2 is for the offset of the HW addr type */
1154 dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
1155
1156 word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1157
1158 word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1159 (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1160
1161 spin_lock_irq(&data->misclock);
1162 TSI_WRITE(TSI108_MAC_ADDR1, word1);
1163 TSI_WRITE(TSI108_MAC_ADDR2, word2);
1164 spin_lock(&data->txlock);
1165
1166 if (data->txfree && data->link_up)
1167 netif_wake_queue(dev);
1168
1169 spin_unlock(&data->txlock);
1170 spin_unlock_irq(&data->misclock);
1171 return 0;
1172}
1173
1174/* Protected by dev->xmit_lock. */
1175static void tsi108_set_rx_mode(struct net_device *dev)
1176{
1177 struct tsi108_prv_data *data = netdev_priv(dev);
1178 u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
1179
1180 if (dev->flags & IFF_PROMISC) {
1181 rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
1182 rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
1183 goto out;
1184 }
1185
1186 rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
1187
1188 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
1189 int i;
1190 struct netdev_hw_addr *ha;
1191 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1192
1193 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1194
1195 netdev_for_each_mc_addr(ha, dev) {
1196 u32 hash, crc;
1197
1198 crc = ether_crc(6, ha->addr);
1199 hash = crc >> 23;
1200 __set_bit(hash, &data->mc_hash[0]);
1201 }
1202
1203 TSI_WRITE(TSI108_EC_HASHADDR,
1204 TSI108_EC_HASHADDR_AUTOINC |
1205 TSI108_EC_HASHADDR_MCAST);
1206
1207 for (i = 0; i < 16; i++) {
1208 /* The manual says that the hardware may drop
1209 * back-to-back writes to the data register.
1210 */
1211 udelay(1);
1212 TSI_WRITE(TSI108_EC_HASHDATA,
1213 data->mc_hash[i]);
1214 }
1215 }
1216
1217 out:
1218 TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
1219}
1220
1221static void tsi108_init_phy(struct net_device *dev)
1222{
1223 struct tsi108_prv_data *data = netdev_priv(dev);
1224 u32 i = 0;
1225 u16 phyval = 0;
1226 unsigned long flags;
1227
1228 spin_lock_irqsave(&phy_lock, flags);
1229
1230 tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
1231 while (--i) {
1232 if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
1233 break;
1234 udelay(10);
1235 }
1236 if (i == 0)
1237 printk(KERN_ERR "%s function time out\n", __func__);
1238
1239 if (data->phy_type == TSI108_PHY_BCM54XX) {
1240 tsi108_write_mii(data, 0x09, 0x0300);
1241 tsi108_write_mii(data, 0x10, 0x1020);
1242 tsi108_write_mii(data, 0x1c, 0x8c00);
1243 }
1244
1245 tsi108_write_mii(data,
1246 MII_BMCR,
1247 BMCR_ANENABLE | BMCR_ANRESTART);
1248 while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
1249 cpu_relax();
1250
1251 /* Set G/MII mode and receive clock select in TBI control #2. The
1252 * second port won't work if this isn't done, even though we don't
1253 * use TBI mode.
1254 */
1255
1256 tsi108_write_tbi(data, 0x11, 0x30);
1257
1258 /* FIXME: It seems to take more than 2 back-to-back reads to the
1259 * PHY_STAT register before the link up status bit is set.
1260 */
1261
1262 data->link_up = 0;
1263
1264 while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
1265 BMSR_LSTATUS)) {
1266 if (i++ > (MII_READ_DELAY / 10)) {
1267 break;
1268 }
1269 spin_unlock_irqrestore(&phy_lock, flags);
1270 msleep(10);
1271 spin_lock_irqsave(&phy_lock, flags);
1272 }
1273
1274 data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1275 printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
1276 data->phy_ok = 1;
1277 data->init_media = 1;
1278 spin_unlock_irqrestore(&phy_lock, flags);
1279}
1280
1281static void tsi108_kill_phy(struct net_device *dev)
1282{
1283 struct tsi108_prv_data *data = netdev_priv(dev);
1284 unsigned long flags;
1285
1286 spin_lock_irqsave(&phy_lock, flags);
1287 tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
1288 data->phy_ok = 0;
1289 spin_unlock_irqrestore(&phy_lock, flags);
1290}
1291
1292static int tsi108_open(struct net_device *dev)
1293{
1294 int i;
1295 struct tsi108_prv_data *data = netdev_priv(dev);
1296 unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
1297 unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
1298
1299 i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
1300 if (i != 0) {
1301 printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
1302 data->id, data->irq_num);
1303 return i;
1304 } else {
1305 dev->irq = data->irq_num;
1306 printk(KERN_NOTICE
1307 "tsi108_open : Port %d Assigned IRQ %d to %s\n",
1308 data->id, dev->irq, dev->name);
1309 }
1310
1311 data->rxring = dma_alloc_coherent(NULL, rxring_size,
1312 &data->rxdma, GFP_KERNEL);
1313
1314 if (!data->rxring) {
1315 printk(KERN_DEBUG
1316 "TSI108_ETH: failed to allocate memory for rxring!\n");
1317 return -ENOMEM;
1318 } else {
1319 memset(data->rxring, 0, rxring_size);
1320 }
1321
1322 data->txring = dma_alloc_coherent(NULL, txring_size,
1323 &data->txdma, GFP_KERNEL);
1324
1325 if (!data->txring) {
1326 printk(KERN_DEBUG
1327 "TSI108_ETH: failed to allocate memory for txring!\n");
1328 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1329 return -ENOMEM;
1330 } else {
1331 memset(data->txring, 0, txring_size);
1332 }
1333
1334 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1335 data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
1336 data->rxring[i].blen = TSI108_RXBUF_SIZE;
1337 data->rxring[i].vlan = 0;
1338 }
1339
1340 data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
1341
1342 data->rxtail = 0;
1343 data->rxhead = 0;
1344
1345 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1346 struct sk_buff *skb;
1347
1348 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
1349 if (!skb) {
1350 /* Bah. No memory for now, but maybe we'll get
1351 * some more later.
1352 * For now, we'll live with the smaller ring.
1353 */
1354 printk(KERN_WARNING
1355 "%s: Could only allocate %d receive skb(s).\n",
1356 dev->name, i);
1357 data->rxhead = i;
1358 break;
1359 }
1360
1361 data->rxskbs[i] = skb;
1362 data->rxskbs[i] = skb;
1363 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1364 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1365 }
1366
1367 data->rxfree = i;
1368 TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
1369
1370 for (i = 0; i < TSI108_TXRING_LEN; i++) {
1371 data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
1372 data->txring[i].misc = 0;
1373 }
1374
1375 data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
1376 data->txtail = 0;
1377 data->txhead = 0;
1378 data->txfree = TSI108_TXRING_LEN;
1379 TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
1380 tsi108_init_phy(dev);
1381
1382 napi_enable(&data->napi);
1383
1384 setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
1385 mod_timer(&data->timer, jiffies + 1);
1386
1387 tsi108_restart_rx(data, dev);
1388
1389 TSI_WRITE(TSI108_EC_INTSTAT, ~0);
1390
1391 TSI_WRITE(TSI108_EC_INTMASK,
1392 ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
1393 TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
1394 TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
1395 TSI108_INT_SFN | TSI108_INT_STATCARRY));
1396
1397 TSI_WRITE(TSI108_MAC_CFG1,
1398 TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
1399 netif_start_queue(dev);
1400 return 0;
1401}
1402
1403static int tsi108_close(struct net_device *dev)
1404{
1405 struct tsi108_prv_data *data = netdev_priv(dev);
1406
1407 netif_stop_queue(dev);
1408 napi_disable(&data->napi);
1409
1410 del_timer_sync(&data->timer);
1411
1412 tsi108_stop_ethernet(dev);
1413 tsi108_kill_phy(dev);
1414 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1415 TSI_WRITE(TSI108_MAC_CFG1, 0);
1416
1417 /* Check for any pending TX packets, and drop them. */
1418
1419 while (!data->txfree || data->txhead != data->txtail) {
1420 int tx = data->txtail;
1421 struct sk_buff *skb;
1422 skb = data->txskbs[tx];
1423 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
1424 data->txfree++;
1425 dev_kfree_skb(skb);
1426 }
1427
1428 free_irq(data->irq_num, dev);
1429
1430 /* Discard the RX ring. */
1431
1432 while (data->rxfree) {
1433 int rx = data->rxtail;
1434 struct sk_buff *skb;
1435
1436 skb = data->rxskbs[rx];
1437 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
1438 data->rxfree--;
1439 dev_kfree_skb(skb);
1440 }
1441
1442 dma_free_coherent(0,
1443 TSI108_RXRING_LEN * sizeof(rx_desc),
1444 data->rxring, data->rxdma);
1445 dma_free_coherent(0,
1446 TSI108_TXRING_LEN * sizeof(tx_desc),
1447 data->txring, data->txdma);
1448
1449 return 0;
1450}
1451
1452static void tsi108_init_mac(struct net_device *dev)
1453{
1454 struct tsi108_prv_data *data = netdev_priv(dev);
1455
1456 TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
1457 TSI108_MAC_CFG2_PADCRC);
1458
1459 TSI_WRITE(TSI108_EC_TXTHRESH,
1460 (192 << TSI108_EC_TXTHRESH_STARTFILL) |
1461 (192 << TSI108_EC_TXTHRESH_STOPFILL));
1462
1463 TSI_WRITE(TSI108_STAT_CARRYMASK1,
1464 ~(TSI108_STAT_CARRY1_RXBYTES |
1465 TSI108_STAT_CARRY1_RXPKTS |
1466 TSI108_STAT_CARRY1_RXFCS |
1467 TSI108_STAT_CARRY1_RXMCAST |
1468 TSI108_STAT_CARRY1_RXALIGN |
1469 TSI108_STAT_CARRY1_RXLENGTH |
1470 TSI108_STAT_CARRY1_RXRUNT |
1471 TSI108_STAT_CARRY1_RXJUMBO |
1472 TSI108_STAT_CARRY1_RXFRAG |
1473 TSI108_STAT_CARRY1_RXJABBER |
1474 TSI108_STAT_CARRY1_RXDROP));
1475
1476 TSI_WRITE(TSI108_STAT_CARRYMASK2,
1477 ~(TSI108_STAT_CARRY2_TXBYTES |
1478 TSI108_STAT_CARRY2_TXPKTS |
1479 TSI108_STAT_CARRY2_TXEXDEF |
1480 TSI108_STAT_CARRY2_TXEXCOL |
1481 TSI108_STAT_CARRY2_TXTCOL |
1482 TSI108_STAT_CARRY2_TXPAUSE));
1483
1484 TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
1485 TSI_WRITE(TSI108_MAC_CFG1, 0);
1486
1487 TSI_WRITE(TSI108_EC_RXCFG,
1488 TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
1489
1490 TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
1491 TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
1492 TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1493 TSI108_EC_TXQ_CFG_SFNPORT));
1494
1495 TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
1496 TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
1497 TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1498 TSI108_EC_RXQ_CFG_SFNPORT));
1499
1500 TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
1501 TSI108_EC_TXQ_BUFCFG_BURST256 |
1502 TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1503 TSI108_EC_TXQ_BUFCFG_SFNPORT));
1504
1505 TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
1506 TSI108_EC_RXQ_BUFCFG_BURST256 |
1507 TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1508 TSI108_EC_RXQ_BUFCFG_SFNPORT));
1509
1510 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1511}
1512
1513static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1514{
1515 struct tsi108_prv_data *data = netdev_priv(dev);
1516 unsigned long flags;
1517 int rc;
1518
1519 spin_lock_irqsave(&data->txlock, flags);
1520 rc = mii_ethtool_gset(&data->mii_if, cmd);
1521 spin_unlock_irqrestore(&data->txlock, flags);
1522
1523 return rc;
1524}
1525
1526static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1527{
1528 struct tsi108_prv_data *data = netdev_priv(dev);
1529 unsigned long flags;
1530 int rc;
1531
1532 spin_lock_irqsave(&data->txlock, flags);
1533 rc = mii_ethtool_sset(&data->mii_if, cmd);
1534 spin_unlock_irqrestore(&data->txlock, flags);
1535
1536 return rc;
1537}
1538
1539static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1540{
1541 struct tsi108_prv_data *data = netdev_priv(dev);
1542 if (!netif_running(dev))
1543 return -EINVAL;
1544 return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
1545}
1546
1547static const struct ethtool_ops tsi108_ethtool_ops = {
1548 .get_link = ethtool_op_get_link,
1549 .get_settings = tsi108_get_settings,
1550 .set_settings = tsi108_set_settings,
1551};
1552
1553static const struct net_device_ops tsi108_netdev_ops = {
1554 .ndo_open = tsi108_open,
1555 .ndo_stop = tsi108_close,
1556 .ndo_start_xmit = tsi108_send_packet,
1557 .ndo_set_multicast_list = tsi108_set_rx_mode,
1558 .ndo_get_stats = tsi108_get_stats,
1559 .ndo_do_ioctl = tsi108_do_ioctl,
1560 .ndo_set_mac_address = tsi108_set_mac,
1561 .ndo_validate_addr = eth_validate_addr,
1562 .ndo_change_mtu = eth_change_mtu,
1563};
1564
1565static int
1566tsi108_init_one(struct platform_device *pdev)
1567{
1568 struct net_device *dev = NULL;
1569 struct tsi108_prv_data *data = NULL;
1570 hw_info *einfo;
1571 int err = 0;
1572
1573 einfo = pdev->dev.platform_data;
1574
1575 if (NULL == einfo) {
1576 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
1577 pdev->id);
1578 return -ENODEV;
1579 }
1580
1581 /* Create an ethernet device instance */
1582
1583 dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
1584 if (!dev) {
1585 printk("tsi108_eth: Could not allocate a device structure\n");
1586 return -ENOMEM;
1587 }
1588
1589 printk("tsi108_eth%d: probe...\n", pdev->id);
1590 data = netdev_priv(dev);
1591 data->dev = dev;
1592
1593 pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
1594 pdev->id, einfo->regs, einfo->phyregs,
1595 einfo->phy, einfo->irq_num);
1596
1597 data->regs = ioremap(einfo->regs, 0x400);
1598 if (NULL == data->regs) {
1599 err = -ENOMEM;
1600 goto regs_fail;
1601 }
1602
1603 data->phyregs = ioremap(einfo->phyregs, 0x400);
1604 if (NULL == data->phyregs) {
1605 err = -ENOMEM;
1606 goto regs_fail;
1607 }
1608/* MII setup */
1609 data->mii_if.dev = dev;
1610 data->mii_if.mdio_read = tsi108_mdio_read;
1611 data->mii_if.mdio_write = tsi108_mdio_write;
1612 data->mii_if.phy_id = einfo->phy;
1613 data->mii_if.phy_id_mask = 0x1f;
1614 data->mii_if.reg_num_mask = 0x1f;
1615
1616 data->phy = einfo->phy;
1617 data->phy_type = einfo->phy_type;
1618 data->irq_num = einfo->irq_num;
1619 data->id = pdev->id;
1620 netif_napi_add(dev, &data->napi, tsi108_poll, 64);
1621 dev->netdev_ops = &tsi108_netdev_ops;
1622 dev->ethtool_ops = &tsi108_ethtool_ops;
1623
1624 /* Apparently, the Linux networking code won't use scatter-gather
1625 * if the hardware doesn't do checksums. However, it's faster
1626 * to checksum in place and use SG, as (among other reasons)
1627 * the cache won't be dirtied (which then has to be flushed
1628 * before DMA). The checksumming is done by the driver (via
1629 * a new function skb_csum_dev() in net/core/skbuff.c).
1630 */
1631
1632 dev->features = NETIF_F_HIGHDMA;
1633
1634 spin_lock_init(&data->txlock);
1635 spin_lock_init(&data->misclock);
1636
1637 tsi108_reset_ether(data);
1638 tsi108_kill_phy(dev);
1639
1640 if ((err = tsi108_get_mac(dev)) != 0) {
1641 printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n",
1642 dev->name);
1643 goto register_fail;
1644 }
1645
1646 tsi108_init_mac(dev);
1647 err = register_netdev(dev);
1648 if (err) {
1649 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1650 dev->name);
1651 goto register_fail;
1652 }
1653
1654 platform_set_drvdata(pdev, dev);
1655 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n",
1656 dev->name, dev->dev_addr);
1657#ifdef DEBUG
1658 data->msg_enable = DEBUG;
1659 dump_eth_one(dev);
1660#endif
1661
1662 return 0;
1663
1664register_fail:
1665 iounmap(data->regs);
1666 iounmap(data->phyregs);
1667
1668regs_fail:
1669 free_netdev(dev);
1670 return err;
1671}
1672
1673/* There's no way to either get interrupts from the PHY when
1674 * something changes, or to have the Tsi108 automatically communicate
1675 * with the PHY to reconfigure itself.
1676 *
1677 * Thus, we have to do it using a timer.
1678 */
1679
1680static void tsi108_timed_checker(unsigned long dev_ptr)
1681{
1682 struct net_device *dev = (struct net_device *)dev_ptr;
1683 struct tsi108_prv_data *data = netdev_priv(dev);
1684
1685 tsi108_check_phy(dev);
1686 tsi108_check_rxring(dev);
1687 mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
1688}
1689
1690static int tsi108_ether_init(void)
1691{
1692 int ret;
1693 ret = platform_driver_register (&tsi_eth_driver);
1694 if (ret < 0){
1695 printk("tsi108_ether_init: error initializing ethernet "
1696 "device\n");
1697 return ret;
1698 }
1699 return 0;
1700}
1701
1702static int tsi108_ether_remove(struct platform_device *pdev)
1703{
1704 struct net_device *dev = platform_get_drvdata(pdev);
1705 struct tsi108_prv_data *priv = netdev_priv(dev);
1706
1707 unregister_netdev(dev);
1708 tsi108_stop_ethernet(dev);
1709 platform_set_drvdata(pdev, NULL);
1710 iounmap(priv->regs);
1711 iounmap(priv->phyregs);
1712 free_netdev(dev);
1713
1714 return 0;
1715}
1716static void tsi108_ether_exit(void)
1717{
1718 platform_driver_unregister(&tsi_eth_driver);
1719}
1720
1721module_init(tsi108_ether_init);
1722module_exit(tsi108_ether_exit);
1723
1724MODULE_AUTHOR("Tundra Semiconductor Corporation");
1725MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
1726MODULE_LICENSE("GPL");
1727MODULE_ALIAS("platform:tsi-ethernet");
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
new file mode 100644
index 000000000000..5fee7d78dc6d
--- /dev/null
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -0,0 +1,356 @@
1/*
2 * (C) Copyright 2005 Tundra Semiconductor Corp.
3 * Kong Lai, <kong.lai@tundra.com).
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24/*
25 * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
26 */
27
28#ifndef __TSI108_ETH_H
29#define __TSI108_ETH_H
30
31#include <linux/types.h>
32
33#define TSI_WRITE(offset, val) \
34 out_be32((data->regs + (offset)), val)
35
36#define TSI_READ(offset) \
37 in_be32((data->regs + (offset)))
38
39#define TSI_WRITE_PHY(offset, val) \
40 out_be32((data->phyregs + (offset)), val)
41
42#define TSI_READ_PHY(offset) \
43 in_be32((data->phyregs + (offset)))
44
45/*
46 * TSI108 GIGE port registers
47 */
48
49#define TSI108_ETH_PORT_NUM 2
50#define TSI108_PBM_PORT 2
51#define TSI108_SDRAM_PORT 4
52
53#define TSI108_MAC_CFG1 (0x000)
54#define TSI108_MAC_CFG1_SOFTRST (1 << 31)
55#define TSI108_MAC_CFG1_LOOPBACK (1 << 8)
56#define TSI108_MAC_CFG1_RXEN (1 << 2)
57#define TSI108_MAC_CFG1_TXEN (1 << 0)
58
59#define TSI108_MAC_CFG2 (0x004)
60#define TSI108_MAC_CFG2_DFLT_PREAMBLE (7 << 12)
61#define TSI108_MAC_CFG2_IFACE_MASK (3 << 8)
62#define TSI108_MAC_CFG2_NOGIG (1 << 8)
63#define TSI108_MAC_CFG2_GIG (2 << 8)
64#define TSI108_MAC_CFG2_PADCRC (1 << 2)
65#define TSI108_MAC_CFG2_FULLDUPLEX (1 << 0)
66
67#define TSI108_MAC_MII_MGMT_CFG (0x020)
68#define TSI108_MAC_MII_MGMT_CLK (7 << 0)
69#define TSI108_MAC_MII_MGMT_RST (1 << 31)
70
71#define TSI108_MAC_MII_CMD (0x024)
72#define TSI108_MAC_MII_CMD_READ (1 << 0)
73
74#define TSI108_MAC_MII_ADDR (0x028)
75#define TSI108_MAC_MII_ADDR_REG 0
76#define TSI108_MAC_MII_ADDR_PHY 8
77
78#define TSI108_MAC_MII_DATAOUT (0x02c)
79#define TSI108_MAC_MII_DATAIN (0x030)
80
81#define TSI108_MAC_MII_IND (0x034)
82#define TSI108_MAC_MII_IND_NOTVALID (1 << 2)
83#define TSI108_MAC_MII_IND_SCANNING (1 << 1)
84#define TSI108_MAC_MII_IND_BUSY (1 << 0)
85
86#define TSI108_MAC_IFCTRL (0x038)
87#define TSI108_MAC_IFCTRL_PHYMODE (1 << 24)
88
89#define TSI108_MAC_ADDR1 (0x040)
90#define TSI108_MAC_ADDR2 (0x044)
91
92#define TSI108_STAT_RXBYTES (0x06c)
93#define TSI108_STAT_RXBYTES_CARRY (1 << 24)
94
95#define TSI108_STAT_RXPKTS (0x070)
96#define TSI108_STAT_RXPKTS_CARRY (1 << 18)
97
98#define TSI108_STAT_RXFCS (0x074)
99#define TSI108_STAT_RXFCS_CARRY (1 << 12)
100
101#define TSI108_STAT_RXMCAST (0x078)
102#define TSI108_STAT_RXMCAST_CARRY (1 << 18)
103
104#define TSI108_STAT_RXALIGN (0x08c)
105#define TSI108_STAT_RXALIGN_CARRY (1 << 12)
106
107#define TSI108_STAT_RXLENGTH (0x090)
108#define TSI108_STAT_RXLENGTH_CARRY (1 << 12)
109
110#define TSI108_STAT_RXRUNT (0x09c)
111#define TSI108_STAT_RXRUNT_CARRY (1 << 12)
112
113#define TSI108_STAT_RXJUMBO (0x0a0)
114#define TSI108_STAT_RXJUMBO_CARRY (1 << 12)
115
116#define TSI108_STAT_RXFRAG (0x0a4)
117#define TSI108_STAT_RXFRAG_CARRY (1 << 12)
118
119#define TSI108_STAT_RXJABBER (0x0a8)
120#define TSI108_STAT_RXJABBER_CARRY (1 << 12)
121
122#define TSI108_STAT_RXDROP (0x0ac)
123#define TSI108_STAT_RXDROP_CARRY (1 << 12)
124
125#define TSI108_STAT_TXBYTES (0x0b0)
126#define TSI108_STAT_TXBYTES_CARRY (1 << 24)
127
128#define TSI108_STAT_TXPKTS (0x0b4)
129#define TSI108_STAT_TXPKTS_CARRY (1 << 18)
130
131#define TSI108_STAT_TXEXDEF (0x0c8)
132#define TSI108_STAT_TXEXDEF_CARRY (1 << 12)
133
134#define TSI108_STAT_TXEXCOL (0x0d8)
135#define TSI108_STAT_TXEXCOL_CARRY (1 << 12)
136
137#define TSI108_STAT_TXTCOL (0x0dc)
138#define TSI108_STAT_TXTCOL_CARRY (1 << 13)
139
140#define TSI108_STAT_TXPAUSEDROP (0x0e4)
141#define TSI108_STAT_TXPAUSEDROP_CARRY (1 << 12)
142
143#define TSI108_STAT_CARRY1 (0x100)
144#define TSI108_STAT_CARRY1_RXBYTES (1 << 16)
145#define TSI108_STAT_CARRY1_RXPKTS (1 << 15)
146#define TSI108_STAT_CARRY1_RXFCS (1 << 14)
147#define TSI108_STAT_CARRY1_RXMCAST (1 << 13)
148#define TSI108_STAT_CARRY1_RXALIGN (1 << 8)
149#define TSI108_STAT_CARRY1_RXLENGTH (1 << 7)
150#define TSI108_STAT_CARRY1_RXRUNT (1 << 4)
151#define TSI108_STAT_CARRY1_RXJUMBO (1 << 3)
152#define TSI108_STAT_CARRY1_RXFRAG (1 << 2)
153#define TSI108_STAT_CARRY1_RXJABBER (1 << 1)
154#define TSI108_STAT_CARRY1_RXDROP (1 << 0)
155
156#define TSI108_STAT_CARRY2 (0x104)
157#define TSI108_STAT_CARRY2_TXBYTES (1 << 13)
158#define TSI108_STAT_CARRY2_TXPKTS (1 << 12)
159#define TSI108_STAT_CARRY2_TXEXDEF (1 << 7)
160#define TSI108_STAT_CARRY2_TXEXCOL (1 << 3)
161#define TSI108_STAT_CARRY2_TXTCOL (1 << 2)
162#define TSI108_STAT_CARRY2_TXPAUSE (1 << 0)
163
164#define TSI108_STAT_CARRYMASK1 (0x108)
165#define TSI108_STAT_CARRYMASK2 (0x10c)
166
167#define TSI108_EC_PORTCTRL (0x200)
168#define TSI108_EC_PORTCTRL_STATRST (1 << 31)
169#define TSI108_EC_PORTCTRL_STATEN (1 << 28)
170#define TSI108_EC_PORTCTRL_NOGIG (1 << 18)
171#define TSI108_EC_PORTCTRL_HALFDUPLEX (1 << 16)
172
173#define TSI108_EC_INTSTAT (0x204)
174#define TSI108_EC_INTMASK (0x208)
175
176#define TSI108_INT_ANY (1 << 31)
177#define TSI108_INT_SFN (1 << 30)
178#define TSI108_INT_RXIDLE (1 << 29)
179#define TSI108_INT_RXABORT (1 << 28)
180#define TSI108_INT_RXERROR (1 << 27)
181#define TSI108_INT_RXOVERRUN (1 << 26)
182#define TSI108_INT_RXTHRESH (1 << 25)
183#define TSI108_INT_RXWAIT (1 << 24)
184#define TSI108_INT_RXQUEUE0 (1 << 16)
185#define TSI108_INT_STATCARRY (1 << 15)
186#define TSI108_INT_TXIDLE (1 << 13)
187#define TSI108_INT_TXABORT (1 << 12)
188#define TSI108_INT_TXERROR (1 << 11)
189#define TSI108_INT_TXUNDERRUN (1 << 10)
190#define TSI108_INT_TXTHRESH (1 << 9)
191#define TSI108_INT_TXWAIT (1 << 8)
192#define TSI108_INT_TXQUEUE0 (1 << 0)
193
194#define TSI108_EC_TXCFG (0x220)
195#define TSI108_EC_TXCFG_RST (1 << 31)
196
197#define TSI108_EC_TXCTRL (0x224)
198#define TSI108_EC_TXCTRL_IDLEINT (1 << 31)
199#define TSI108_EC_TXCTRL_ABORT (1 << 30)
200#define TSI108_EC_TXCTRL_GO (1 << 15)
201#define TSI108_EC_TXCTRL_QUEUE0 (1 << 0)
202
203#define TSI108_EC_TXSTAT (0x228)
204#define TSI108_EC_TXSTAT_ACTIVE (1 << 15)
205#define TSI108_EC_TXSTAT_QUEUE0 (1 << 0)
206
207#define TSI108_EC_TXESTAT (0x22c)
208#define TSI108_EC_TXESTAT_Q0_ERR (1 << 24)
209#define TSI108_EC_TXESTAT_Q0_DESCINT (1 << 16)
210#define TSI108_EC_TXESTAT_Q0_EOF (1 << 8)
211#define TSI108_EC_TXESTAT_Q0_EOQ (1 << 0)
212
213#define TSI108_EC_TXERR (0x278)
214
215#define TSI108_EC_TXQ_CFG (0x280)
216#define TSI108_EC_TXQ_CFG_DESC_INT (1 << 20)
217#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT (1 << 19)
218#define TSI108_EC_TXQ_CFG_WSWP (1 << 11)
219#define TSI108_EC_TXQ_CFG_BSWP (1 << 10)
220#define TSI108_EC_TXQ_CFG_SFNPORT 0
221
222#define TSI108_EC_TXQ_BUFCFG (0x284)
223#define TSI108_EC_TXQ_BUFCFG_BURST8 (0 << 8)
224#define TSI108_EC_TXQ_BUFCFG_BURST32 (1 << 8)
225#define TSI108_EC_TXQ_BUFCFG_BURST128 (2 << 8)
226#define TSI108_EC_TXQ_BUFCFG_BURST256 (3 << 8)
227#define TSI108_EC_TXQ_BUFCFG_WSWP (1 << 11)
228#define TSI108_EC_TXQ_BUFCFG_BSWP (1 << 10)
229#define TSI108_EC_TXQ_BUFCFG_SFNPORT 0
230
231#define TSI108_EC_TXQ_PTRLOW (0x288)
232
233#define TSI108_EC_TXQ_PTRHIGH (0x28c)
234#define TSI108_EC_TXQ_PTRHIGH_VALID (1 << 31)
235
236#define TSI108_EC_TXTHRESH (0x230)
237#define TSI108_EC_TXTHRESH_STARTFILL 0
238#define TSI108_EC_TXTHRESH_STOPFILL 16
239
240#define TSI108_EC_RXCFG (0x320)
241#define TSI108_EC_RXCFG_RST (1 << 31)
242
243#define TSI108_EC_RXSTAT (0x328)
244#define TSI108_EC_RXSTAT_ACTIVE (1 << 15)
245#define TSI108_EC_RXSTAT_QUEUE0 (1 << 0)
246
247#define TSI108_EC_RXESTAT (0x32c)
248#define TSI108_EC_RXESTAT_Q0_ERR (1 << 24)
249#define TSI108_EC_RXESTAT_Q0_DESCINT (1 << 16)
250#define TSI108_EC_RXESTAT_Q0_EOF (1 << 8)
251#define TSI108_EC_RXESTAT_Q0_EOQ (1 << 0)
252
253#define TSI108_EC_HASHADDR (0x360)
254#define TSI108_EC_HASHADDR_AUTOINC (1 << 31)
255#define TSI108_EC_HASHADDR_DO1STREAD (1 << 30)
256#define TSI108_EC_HASHADDR_UNICAST (0 << 4)
257#define TSI108_EC_HASHADDR_MCAST (1 << 4)
258
259#define TSI108_EC_HASHDATA (0x364)
260
261#define TSI108_EC_RXQ_PTRLOW (0x388)
262
263#define TSI108_EC_RXQ_PTRHIGH (0x38c)
264#define TSI108_EC_RXQ_PTRHIGH_VALID (1 << 31)
265
266/* Station Enable -- accept packets destined for us */
267#define TSI108_EC_RXCFG_SE (1 << 13)
268/* Unicast Frame Enable -- for packets not destined for us */
269#define TSI108_EC_RXCFG_UFE (1 << 12)
270/* Multicast Frame Enable */
271#define TSI108_EC_RXCFG_MFE (1 << 11)
272/* Broadcast Frame Enable */
273#define TSI108_EC_RXCFG_BFE (1 << 10)
274#define TSI108_EC_RXCFG_UC_HASH (1 << 9)
275#define TSI108_EC_RXCFG_MC_HASH (1 << 8)
276
277#define TSI108_EC_RXQ_CFG (0x380)
278#define TSI108_EC_RXQ_CFG_DESC_INT (1 << 20)
279#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT (1 << 19)
280#define TSI108_EC_RXQ_CFG_WSWP (1 << 11)
281#define TSI108_EC_RXQ_CFG_BSWP (1 << 10)
282#define TSI108_EC_RXQ_CFG_SFNPORT 0
283
284#define TSI108_EC_RXQ_BUFCFG (0x384)
285#define TSI108_EC_RXQ_BUFCFG_BURST8 (0 << 8)
286#define TSI108_EC_RXQ_BUFCFG_BURST32 (1 << 8)
287#define TSI108_EC_RXQ_BUFCFG_BURST128 (2 << 8)
288#define TSI108_EC_RXQ_BUFCFG_BURST256 (3 << 8)
289#define TSI108_EC_RXQ_BUFCFG_WSWP (1 << 11)
290#define TSI108_EC_RXQ_BUFCFG_BSWP (1 << 10)
291#define TSI108_EC_RXQ_BUFCFG_SFNPORT 0
292
293#define TSI108_EC_RXCTRL (0x324)
294#define TSI108_EC_RXCTRL_ABORT (1 << 30)
295#define TSI108_EC_RXCTRL_GO (1 << 15)
296#define TSI108_EC_RXCTRL_QUEUE0 (1 << 0)
297
298#define TSI108_EC_RXERR (0x378)
299
300#define TSI108_TX_EOF (1 << 0) /* End of frame; last fragment of packet */
301#define TSI108_TX_SOF (1 << 1) /* Start of frame; first frag. of packet */
302#define TSI108_TX_VLAN (1 << 2) /* Per-frame VLAN: enables VLAN override */
303#define TSI108_TX_HUGE (1 << 3) /* Huge frame enable */
304#define TSI108_TX_PAD (1 << 4) /* Pad the packet if too short */
305#define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */
306#define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */
307#define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */
308#define TSI108_TX_COL (1 << 20) /* Set if a collision occurred */
309#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occurred */
310#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occurred */
311#define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */
312#define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */
313#define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */
314
315/* Note: the descriptor layouts assume big-endian byte order. */
316typedef struct {
317 u32 buf0;
318 u32 buf1; /* Base address of buffer */
319 u32 next0; /* Address of next descriptor, if any */
320 u32 next1;
321 u16 vlan; /* VLAN, if override enabled for this packet */
322 u16 len; /* Length of buffer in bytes */
323 u32 misc; /* See TSI108_TX_* above */
324 u32 reserved0; /*reserved0 and reserved1 are added to make the desc */
325 u32 reserved1; /* 32-byte aligned */
326} __attribute__ ((aligned(32))) tx_desc;
327
328#define TSI108_RX_EOF (1 << 0) /* End of frame; last fragment of packet */
329#define TSI108_RX_SOF (1 << 1) /* Start of frame; first frag. of packet */
330#define TSI108_RX_VLAN (1 << 2) /* Set on SOF if packet has a VLAN */
331#define TSI108_RX_FTYPE (1 << 3) /* Length/Type field is type, not length */
332#define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */
333#define TSI108_RX_HASH (1 << 7)/* Hash table match */
334#define TSI108_RX_BAD (1 << 8) /* Bad frame */
335#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occurred */
336#define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */
337#define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */
338#define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */
339#define TSI108_RX_OWN (1 << 15) /* Set if the device owns the descriptor */
340
341#define TSI108_RX_SKB_SIZE 1536 /* The RX skb length */
342
343typedef struct {
344 u32 buf0; /* Base address of buffer */
345 u32 buf1; /* Base address of buffer */
346 u32 next0; /* Address of next descriptor, if any */
347 u32 next1; /* Address of next descriptor, if any */
348 u16 vlan; /* VLAN of received packet, first frag only */
349 u16 len; /* Length of received fragment in bytes */
350 u16 blen; /* Length of buffer in bytes */
351 u16 misc; /* See TSI108_RX_* above */
352 u32 reserved0; /* reserved0 and reserved1 are added to make the desc */
353 u32 reserved1; /* 32-byte aligned */
354} __attribute__ ((aligned(32))) rx_desc;
355
356#endif /* __TSI108_ETH_H */