aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/sis190.c1359
3 files changed, 1370 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8a835eb58808..765fbb29d386 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1921,6 +1921,16 @@ config R8169_VLAN
1921 1921
1922 If in doubt, say Y. 1922 If in doubt, say Y.
1923 1923
1924config SIS190
1925 tristate "SiS190 gigabit ethernet support"
1926 depends on PCI
1927 select CRC32
1928 ---help---
1929 Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
1930
1931 To compile this driver as a module, choose M here: the module
1932 will be called sis190. This is recommended.
1933
1924config SKGE 1934config SKGE
1925 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 1935 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
1926 depends on PCI && EXPERIMENTAL 1936 depends on PCI && EXPERIMENTAL
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 63c6d1e6d4d9..67b280af425e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
42obj-$(CONFIG_E100) += e100.o 42obj-$(CONFIG_E100) += e100.o
43obj-$(CONFIG_TLAN) += tlan.o 43obj-$(CONFIG_TLAN) += tlan.o
44obj-$(CONFIG_EPIC100) += epic100.o 44obj-$(CONFIG_EPIC100) += epic100.o
45obj-$(CONFIG_SIS190) += sis190.o
45obj-$(CONFIG_SIS900) += sis900.o 46obj-$(CONFIG_SIS900) += sis900.o
46obj-$(CONFIG_YELLOWFIN) += yellowfin.o 47obj-$(CONFIG_YELLOWFIN) += yellowfin.o
47obj-$(CONFIG_ACENIC) += acenic.o 48obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
index 000000000000..fd303e7408ac
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1359 @@
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
9
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
16
17 See the file COPYING in this distribution for more information.
18
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/pci.h>
27#include <linux/mii.h>
28#include <linux/delay.h>
29#include <linux/crc32.h>
30#include <linux/dma-mapping.h>
31#include <asm/irq.h>
32
33#define net_drv(p, arg...) if (netif_msg_drv(p)) \
34 printk(arg)
35#define net_probe(p, arg...) if (netif_msg_probe(p)) \
36 printk(arg)
37#define net_link(p, arg...) if (netif_msg_link(p)) \
38 printk(arg)
39#define net_intr(p, arg...) if (netif_msg_intr(p)) \
40 printk(arg)
41#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
42 printk(arg)
43
44#ifdef CONFIG_SIS190_NAPI
45#define NAPI_SUFFIX "-NAPI"
46#else
47#define NAPI_SUFFIX ""
48#endif
49
50#define DRV_VERSION "1.2" NAPI_SUFFIX
51#define DRV_NAME "sis190"
52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53#define PFX DRV_NAME ": "
54
55#ifdef CONFIG_SIS190_NAPI
56#define sis190_rx_skb netif_receive_skb
57#define sis190_rx_quota(count, quota) min(count, quota)
58#else
59#define sis190_rx_skb netif_rx
60#define sis190_rx_quota(count, quota) count
61#endif
62
63#define MAC_ADDR_LEN 6
64
65#define NUM_TX_DESC 64
66#define NUM_RX_DESC 64
67#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
68#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
69#define RX_BUF_SIZE 1536
70
71#define SIS190_REGS_SIZE 0x80
72#define SIS190_TX_TIMEOUT (6*HZ)
73#define SIS190_PHY_TIMEOUT (10*HZ)
74#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
75 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
76 NETIF_MSG_IFDOWN)
77
78/* Enhanced PHY access register bit definitions */
79#define EhnMIIread 0x0000
80#define EhnMIIwrite 0x0020
81#define EhnMIIdataShift 16
82#define EhnMIIpmdShift 6 /* 7016 only */
83#define EhnMIIregShift 11
84#define EhnMIIreq 0x0010
85#define EhnMIInotDone 0x0010
86
87/* Write/read MMIO register */
88#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
89#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
90#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
91#define SIS_R8(reg) readb (ioaddr + (reg))
92#define SIS_R16(reg) readw (ioaddr + (reg))
93#define SIS_R32(reg) readl (ioaddr + (reg))
94
95#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
96
97enum sis190_registers {
98 TxControl = 0x00,
99 TxDescStartAddr = 0x04,
100 TxNextDescAddr = 0x0c, // unused
101 RxControl = 0x10,
102 RxDescStartAddr = 0x14,
103 RxNextDescAddr = 0x1c, // unused
104 IntrStatus = 0x20,
105 IntrMask = 0x24,
106 IntrControl = 0x28,
107 IntrTimer = 0x2c, // unused
108 PMControl = 0x30, // unused
109 ROMControl = 0x38,
110 ROMInterface = 0x3c,
111 StationControl = 0x40,
112 GMIIControl = 0x44,
113 TxMacControl = 0x50,
114 RxMacControl = 0x60,
115 RxMacAddr = 0x62,
116 RxHashTable = 0x68,
117 // Undocumented = 0x6c,
118 RxWakeOnLan = 0x70,
119 // Undocumented = 0x74,
120 RxMPSControl = 0x78, // unused
121};
122
123enum sis190_register_content {
124 /* IntrStatus */
125 SoftInt = 0x40000000, // unused
126 Timeup = 0x20000000, // unused
127 PauseFrame = 0x00080000, // unused
128 MagicPacket = 0x00040000, // unused
129 WakeupFrame = 0x00020000, // unused
130 LinkChange = 0x00010000,
131 RxQEmpty = 0x00000080,
132 RxQInt = 0x00000040,
133 TxQ1Empty = 0x00000020, // unused
134 TxQ1Int = 0x00000010,
135 TxQ0Empty = 0x00000008, // unused
136 TxQ0Int = 0x00000004,
137 RxHalt = 0x00000002,
138 TxHalt = 0x00000001,
139
140 /* RxStatusDesc */
141 RxRES = 0x00200000, // unused
142 RxCRC = 0x00080000,
143 RxRUNT = 0x00100000, // unused
144 RxRWT = 0x00400000, // unused
145
146 /* {Rx/Tx}CmdBits */
147 CmdReset = 0x10,
148 CmdRxEnb = 0x08, // unused
149 CmdTxEnb = 0x01,
150 RxBufEmpty = 0x01, // unused
151
152 /* Cfg9346Bits */
153 Cfg9346_Lock = 0x00, // unused
154 Cfg9346_Unlock = 0xc0, // unused
155
156 /* RxMacControl */
157 AcceptErr = 0x20, // unused
158 AcceptRunt = 0x10, // unused
159 AcceptBroadcast = 0x0800,
160 AcceptMulticast = 0x0400,
161 AcceptMyPhys = 0x0200,
162 AcceptAllPhys = 0x0100,
163
164 /* RxConfigBits */
165 RxCfgFIFOShift = 13,
166 RxCfgDMAShift = 8, // 0x1a in RxControl ?
167
168 /* TxConfigBits */
169 TxInterFrameGapShift = 24,
170 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171
172 /* StationControl */
173 _1000bpsF = 0x1c00,
174 _1000bpsH = 0x0c00,
175 _100bpsF = 0x1800,
176 _100bpsH = 0x0800,
177 _10bpsF = 0x1400,
178 _10bpsH = 0x0400,
179
180 LinkStatus = 0x02, // unused
181 FullDup = 0x01, // unused
182
183 /* TBICSRBit */
184 TBILinkOK = 0x02000000, // unused
185};
186
187struct TxDesc {
188 u32 PSize;
189 u32 status;
190 u32 addr;
191 u32 size;
192};
193
194struct RxDesc {
195 u32 PSize;
196 u32 status;
197 u32 addr;
198 u32 size;
199};
200
201enum _DescStatusBit {
202 /* _Desc.status */
203 OWNbit = 0x80000000,
204 INTbit = 0x40000000,
205 DEFbit = 0x00200000,
206 CRCbit = 0x00020000,
207 PADbit = 0x00010000,
208 /* _Desc.size */
209 RingEnd = (1 << 31),
210 /* _Desc.PSize */
211 RxSizeMask = 0x0000ffff
212};
213
214struct sis190_private {
215 void __iomem *mmio_addr;
216 struct pci_dev *pci_dev;
217 struct net_device_stats stats;
218 spinlock_t lock;
219 u32 rx_buf_sz;
220 u32 cur_rx;
221 u32 cur_tx;
222 u32 dirty_rx;
223 u32 dirty_tx;
224 dma_addr_t rx_dma;
225 dma_addr_t tx_dma;
226 struct RxDesc *RxDescRing;
227 struct TxDesc *TxDescRing;
228 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
229 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
230 struct work_struct phy_task;
231 struct timer_list timer;
232 u32 msg_enable;
233};
234
235const static struct {
236 const char *name;
237 u8 version; /* depend on docs */
238 u32 RxConfigMask; /* clear the bits supported by this chip */
239} sis_chip_info[] = {
240 { DRV_NAME, 0x00, 0xff7e1880, },
241};
242
243static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
244 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
245 { 0, },
246};
247
248MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
249
250static int rx_copybreak = 200;
251
252static struct {
253 u32 msg_enable;
254} debug = { -1 };
255
256MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
257module_param(rx_copybreak, int, 0);
258MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
259module_param_named(debug, debug.msg_enable, int, 0);
260MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
261MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
262MODULE_VERSION(DRV_VERSION);
263MODULE_LICENSE("GPL");
264
265static const u32 sis190_intr_mask =
266 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
267
268/*
269 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
270 * The chips use a 64 element hash table based on the Ethernet CRC.
271 */
272static int multicast_filter_limit = 32;
273
274static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
275{
276 unsigned int i;
277
278 SIS_W32(GMIIControl, ctl);
279
280 msleep(1);
281
282 for (i = 0; i < 100; i++) {
283 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
284 break;
285 msleep(1);
286 }
287
288 if (i > 999)
289 printk(KERN_ERR PFX "PHY command failed !\n");
290}
291
292static void mdio_write(void __iomem *ioaddr, int reg, int val)
293{
294 u32 pmd = 1;
295
296 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
297 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
298 (((u32) val) << EhnMIIdataShift));
299}
300
301static int mdio_read(void __iomem *ioaddr, int reg)
302{
303 u32 pmd = 1;
304
305 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
306 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
307
308 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
309}
310
311static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
312{
313 unsigned int i;
314 u16 data;
315 u32 val;
316
317 if (!(SIS_R32(ROMControl) & 0x0002))
318 return 0;
319
320 val = (0x0080 | (0x2 << 8) | (reg << 10));
321
322 SIS_W32(ROMInterface, val);
323
324 for (i = 0; i < 200; i++) {
325 if (!(SIS_R32(ROMInterface) & 0x0080))
326 break;
327 msleep(1);
328 }
329
330 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
331
332 return data;
333}
334
335static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
336{
337 SIS_W32(IntrMask, 0x00);
338 SIS_W32(IntrStatus, 0xffffffff);
339 SIS_PCI_COMMIT();
340}
341
342static void sis190_asic_down(void __iomem *ioaddr)
343{
344 /* Stop the chip's Tx and Rx DMA processes. */
345
346 SIS_W32(TxControl, 0x1a00);
347 SIS_W32(RxControl, 0x1a00);
348
349 sis190_irq_mask_and_ack(ioaddr);
350}
351
352static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
353{
354 desc->size |= cpu_to_le32(RingEnd);
355}
356
357static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
358{
359 u32 eor = le32_to_cpu(desc->size) & RingEnd;
360
361 desc->PSize = 0x0;
362 desc->size = cpu_to_le32(rx_buf_sz | eor);
363 wmb();
364 desc->status = cpu_to_le32(OWNbit | INTbit);
365}
366
367static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
368 u32 rx_buf_sz)
369{
370 desc->addr = cpu_to_le32(mapping);
371 sis190_give_to_asic(desc, rx_buf_sz);
372}
373
374static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
375{
376 desc->PSize = 0x0;
377 desc->addr = 0xdeadbeef;
378 desc->size &= cpu_to_le32(RingEnd);
379 wmb();
380 desc->status = 0x0;
381}
382
383static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
384 struct RxDesc *desc, u32 rx_buf_sz)
385{
386 struct sk_buff *skb;
387 dma_addr_t mapping;
388 int ret = 0;
389
390 skb = dev_alloc_skb(rx_buf_sz);
391 if (!skb)
392 goto err_out;
393
394 *sk_buff = skb;
395
396 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
397 PCI_DMA_FROMDEVICE);
398
399 sis190_map_to_asic(desc, mapping, rx_buf_sz);
400out:
401 return ret;
402
403err_out:
404 ret = -ENOMEM;
405 sis190_make_unusable_by_asic(desc);
406 goto out;
407}
408
409static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
410 u32 start, u32 end)
411{
412 u32 cur;
413
414 for (cur = start; cur < end; cur++) {
415 int ret, i = cur % NUM_RX_DESC;
416
417 if (tp->Rx_skbuff[i])
418 continue;
419
420 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
421 tp->RxDescRing + i, tp->rx_buf_sz);
422 if (ret < 0)
423 break;
424 }
425 return cur - start;
426}
427
428static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
429 struct RxDesc *desc, int rx_buf_sz)
430{
431 int ret = -1;
432
433 if (pkt_size < rx_copybreak) {
434 struct sk_buff *skb;
435
436 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
437 if (skb) {
438 skb_reserve(skb, NET_IP_ALIGN);
439 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
440 *sk_buff = skb;
441 sis190_give_to_asic(desc, rx_buf_sz);
442 ret = 0;
443 }
444 }
445 return ret;
446}
447
448static int sis190_rx_interrupt(struct net_device *dev,
449 struct sis190_private *tp, void __iomem *ioaddr)
450{
451 struct net_device_stats *stats = &tp->stats;
452 u32 rx_left, cur_rx = tp->cur_rx;
453 u32 delta, count;
454
455 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
456 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
457
458 for (; rx_left > 0; rx_left--, cur_rx++) {
459 unsigned int entry = cur_rx % NUM_RX_DESC;
460 struct RxDesc *desc = tp->RxDescRing + entry;
461 u32 status;
462
463 if (desc->status & OWNbit)
464 break;
465
466 status = le32_to_cpu(desc->PSize);
467
468 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
469 // status);
470
471 if (status & RxCRC) {
472 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
473 dev->name, status);
474 stats->rx_errors++;
475 stats->rx_crc_errors++;
476 sis190_give_to_asic(desc, tp->rx_buf_sz);
477 } else if (!(status & PADbit)) {
478 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
479 dev->name, status);
480 stats->rx_errors++;
481 stats->rx_length_errors++;
482 sis190_give_to_asic(desc, tp->rx_buf_sz);
483 } else {
484 struct sk_buff *skb = tp->Rx_skbuff[entry];
485 int pkt_size = (status & RxSizeMask) - 4;
486 void (*pci_action)(struct pci_dev *, dma_addr_t,
487 size_t, int) = pci_dma_sync_single_for_device;
488
489 if (unlikely(pkt_size > tp->rx_buf_sz)) {
490 net_intr(tp, KERN_INFO
491 "%s: (frag) status = %08x.\n",
492 dev->name, status);
493 stats->rx_dropped++;
494 stats->rx_length_errors++;
495 sis190_give_to_asic(desc, tp->rx_buf_sz);
496 continue;
497 }
498
499 pci_dma_sync_single_for_cpu(tp->pci_dev,
500 le32_to_cpu(desc->addr), tp->rx_buf_sz,
501 PCI_DMA_FROMDEVICE);
502
503 if (sis190_try_rx_copy(&skb, pkt_size, desc,
504 tp->rx_buf_sz)) {
505 pci_action = pci_unmap_single;
506 tp->Rx_skbuff[entry] = NULL;
507 sis190_make_unusable_by_asic(desc);
508 }
509
510 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
511 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
512
513 skb->dev = dev;
514 skb_put(skb, pkt_size);
515 skb->protocol = eth_type_trans(skb, dev);
516
517 sis190_rx_skb(skb);
518
519 dev->last_rx = jiffies;
520 stats->rx_bytes += pkt_size;
521 stats->rx_packets++;
522 }
523 }
524 count = cur_rx - tp->cur_rx;
525 tp->cur_rx = cur_rx;
526
527 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
528 if (!delta && count && netif_msg_intr(tp))
529 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
530 tp->dirty_rx += delta;
531
532 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
533 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
534
535 return count;
536}
537
538static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
539 struct TxDesc *desc)
540{
541 unsigned int len;
542
543 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
544
545 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
546
547 memset(desc, 0x00, sizeof(*desc));
548}
549
550static void sis190_tx_interrupt(struct net_device *dev,
551 struct sis190_private *tp, void __iomem *ioaddr)
552{
553 u32 pending, dirty_tx = tp->dirty_tx;
554 /*
555 * It would not be needed if queueing was allowed to be enabled
556 * again too early (hint: think preempt and unclocked smp systems).
557 */
558 unsigned int queue_stopped;
559
560 smp_rmb();
561 pending = tp->cur_tx - dirty_tx;
562 queue_stopped = (pending == NUM_TX_DESC);
563
564 for (; pending; pending--, dirty_tx++) {
565 unsigned int entry = dirty_tx % NUM_TX_DESC;
566 struct TxDesc *txd = tp->TxDescRing + entry;
567 struct sk_buff *skb;
568
569 if (le32_to_cpu(txd->status) & OWNbit)
570 break;
571
572 skb = tp->Tx_skbuff[entry];
573
574 tp->stats.tx_packets++;
575 tp->stats.tx_bytes += skb->len;
576
577 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
578 tp->Tx_skbuff[entry] = NULL;
579 dev_kfree_skb_irq(skb);
580 }
581
582 if (tp->dirty_tx != dirty_tx) {
583 tp->dirty_tx = dirty_tx;
584 smp_wmb();
585 if (queue_stopped)
586 netif_wake_queue(dev);
587 }
588}
589
590/*
591 * The interrupt handler does all of the Rx thread work and cleans up after
592 * the Tx thread.
593 */
594static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
595{
596 struct net_device *dev = __dev;
597 struct sis190_private *tp = netdev_priv(dev);
598 void __iomem *ioaddr = tp->mmio_addr;
599 unsigned int handled = 0;
600 u32 status;
601
602 status = SIS_R32(IntrStatus);
603
604 if ((status == 0xffffffff) || !status)
605 goto out;
606
607 handled = 1;
608
609 if (unlikely(!netif_running(dev))) {
610 sis190_asic_down(ioaddr);
611 goto out;
612 }
613
614 SIS_W32(IntrStatus, status);
615
616 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
617
618 if (status & LinkChange) {
619 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
620 schedule_work(&tp->phy_task);
621 }
622
623 if (status & RxQInt)
624 sis190_rx_interrupt(dev, tp, ioaddr);
625
626 if (status & TxQ0Int)
627 sis190_tx_interrupt(dev, tp, ioaddr);
628out:
629 return IRQ_RETVAL(handled);
630}
631
632static void sis190_free_rx_skb(struct sis190_private *tp,
633 struct sk_buff **sk_buff, struct RxDesc *desc)
634{
635 struct pci_dev *pdev = tp->pci_dev;
636
637 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
638 PCI_DMA_FROMDEVICE);
639 dev_kfree_skb(*sk_buff);
640 *sk_buff = NULL;
641 sis190_make_unusable_by_asic(desc);
642}
643
644static void sis190_rx_clear(struct sis190_private *tp)
645{
646 unsigned int i;
647
648 for (i = 0; i < NUM_RX_DESC; i++) {
649 if (!tp->Rx_skbuff[i])
650 continue;
651 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
652 }
653}
654
655static void sis190_init_ring_indexes(struct sis190_private *tp)
656{
657 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
658}
659
660static int sis190_init_ring(struct net_device *dev)
661{
662 struct sis190_private *tp = netdev_priv(dev);
663
664 sis190_init_ring_indexes(tp);
665
666 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
667 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
668
669 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
670 goto err_rx_clear;
671
672 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
673
674 return 0;
675
676err_rx_clear:
677 sis190_rx_clear(tp);
678 return -ENOMEM;
679}
680
681static void sis190_set_rx_mode(struct net_device *dev)
682{
683 struct sis190_private *tp = netdev_priv(dev);
684 void __iomem *ioaddr = tp->mmio_addr;
685 unsigned long flags;
686 u32 mc_filter[2]; /* Multicast hash filter */
687 u16 rx_mode;
688
689 if (dev->flags & IFF_PROMISC) {
690 /* Unconditionally log net taps. */
691 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
692 dev->name);
693 rx_mode =
694 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
695 AcceptAllPhys;
696 mc_filter[1] = mc_filter[0] = 0xffffffff;
697 } else if ((dev->mc_count > multicast_filter_limit) ||
698 (dev->flags & IFF_ALLMULTI)) {
699 /* Too many to filter perfectly -- accept all multicasts. */
700 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
701 mc_filter[1] = mc_filter[0] = 0xffffffff;
702 } else {
703 struct dev_mc_list *mclist;
704 unsigned int i;
705
706 rx_mode = AcceptBroadcast | AcceptMyPhys;
707 mc_filter[1] = mc_filter[0] = 0;
708 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
709 i++, mclist = mclist->next) {
710 int bit_nr =
711 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
712 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
713 rx_mode |= AcceptMulticast;
714 }
715 }
716
717 spin_lock_irqsave(&tp->lock, flags);
718
719 SIS_W16(RxMacControl, rx_mode | 0x2);
720 SIS_W32(RxHashTable, mc_filter[0]);
721 SIS_W32(RxHashTable + 4, mc_filter[1]);
722
723 spin_unlock_irqrestore(&tp->lock, flags);
724}
725
726static void sis190_soft_reset(void __iomem *ioaddr)
727{
728 SIS_W32(IntrControl, 0x8000);
729 SIS_PCI_COMMIT();
730 msleep(1);
731 SIS_W32(IntrControl, 0x0);
732 sis190_asic_down(ioaddr);
733 msleep(1);
734}
735
736static void sis190_hw_start(struct net_device *dev)
737{
738 struct sis190_private *tp = netdev_priv(dev);
739 void __iomem *ioaddr = tp->mmio_addr;
740
741 sis190_soft_reset(ioaddr);
742
743 SIS_W32(TxDescStartAddr, tp->tx_dma);
744 SIS_W32(RxDescStartAddr, tp->rx_dma);
745
746 SIS_W32(IntrStatus, 0xffffffff);
747 SIS_W32(IntrMask, 0x0);
748 /*
749 * Default is 100Mbps.
750 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
751 */
752 SIS_W16(StationControl, 0x1901);
753 SIS_W32(GMIIControl, 0x0);
754 SIS_W32(TxMacControl, 0x60);
755 SIS_W16(RxMacControl, 0x02);
756 SIS_W32(RxHashTable, 0x0);
757 SIS_W32(0x6c, 0x0);
758 SIS_W32(RxWakeOnLan, 0x0);
759 SIS_W32(0x74, 0x0);
760
761 SIS_PCI_COMMIT();
762
763 sis190_set_rx_mode(dev);
764
765 /* Enable all known interrupts by setting the interrupt mask. */
766 SIS_W32(IntrMask, sis190_intr_mask);
767
768 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
769 SIS_W32(RxControl, 0x1a1d);
770
771 netif_start_queue(dev);
772}
773
774static void sis190_phy_task(void * data)
775{
776 struct net_device *dev = data;
777 struct sis190_private *tp = netdev_priv(dev);
778 void __iomem *ioaddr = tp->mmio_addr;
779 u16 val;
780
781 val = mdio_read(ioaddr, MII_BMCR);
782 if (val & BMCR_RESET) {
783 // FIXME: needlessly high ? -- FR 02/07/2005
784 mod_timer(&tp->timer, jiffies + HZ/10);
785 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
786 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
787 dev->name);
788 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
789 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
790 } else {
791 /* Rejoice ! */
792 struct {
793 int val;
794 const char *msg;
795 u16 ctl;
796 } reg31[] = {
797 { LPA_1000XFULL | LPA_SLCT,
798 "1000 Mbps Full Duplex",
799 0x01 | _1000bpsF },
800 { LPA_1000XHALF | LPA_SLCT,
801 "1000 Mbps Half Duplex",
802 0x01 | _1000bpsH },
803 { LPA_100FULL,
804 "100 Mbps Full Duplex",
805 0x01 | _100bpsF },
806 { LPA_100HALF,
807 "100 Mbps Half Duplex",
808 0x01 | _100bpsH },
809 { LPA_10FULL,
810 "10 Mbps Full Duplex",
811 0x01 | _10bpsF },
812 { LPA_10HALF,
813 "10 Mbps Half Duplex",
814 0x01 | _10bpsH },
815 { 0, "unknown", 0x0000 }
816 }, *p;
817
818 val = mdio_read(ioaddr, 0x1f);
819 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
820
821 val = mdio_read(ioaddr, MII_LPA);
822 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
823
824 for (p = reg31; p->ctl; p++) {
825 if ((val & p->val) == p->val)
826 break;
827 }
828 if (p->ctl)
829 SIS_W16(StationControl, p->ctl);
830 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
831 p->msg);
832 netif_carrier_on(dev);
833 }
834}
835
836static void sis190_phy_timer(unsigned long __opaque)
837{
838 struct net_device *dev = (struct net_device *)__opaque;
839 struct sis190_private *tp = netdev_priv(dev);
840
841 if (likely(netif_running(dev)))
842 schedule_work(&tp->phy_task);
843}
844
845static inline void sis190_delete_timer(struct net_device *dev)
846{
847 struct sis190_private *tp = netdev_priv(dev);
848
849 del_timer_sync(&tp->timer);
850}
851
852static inline void sis190_request_timer(struct net_device *dev)
853{
854 struct sis190_private *tp = netdev_priv(dev);
855 struct timer_list *timer = &tp->timer;
856
857 init_timer(timer);
858 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
859 timer->data = (unsigned long)dev;
860 timer->function = sis190_phy_timer;
861 add_timer(timer);
862}
863
864static void sis190_set_rxbufsize(struct sis190_private *tp,
865 struct net_device *dev)
866{
867 unsigned int mtu = dev->mtu;
868
869 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
870}
871
872static int sis190_open(struct net_device *dev)
873{
874 struct sis190_private *tp = netdev_priv(dev);
875 struct pci_dev *pdev = tp->pci_dev;
876 int rc = -ENOMEM;
877
878 sis190_set_rxbufsize(tp, dev);
879
880 /*
881 * Rx and Tx descriptors need 256 bytes alignment.
882 * pci_alloc_consistent() guarantees a stronger alignment.
883 */
884 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
885 if (!tp->TxDescRing)
886 goto out;
887
888 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
889 if (!tp->RxDescRing)
890 goto err_free_tx_0;
891
892 rc = sis190_init_ring(dev);
893 if (rc < 0)
894 goto err_free_rx_1;
895
896 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
897
898 sis190_request_timer(dev);
899
900 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
901 if (rc < 0)
902 goto err_release_timer_2;
903
904 sis190_hw_start(dev);
905out:
906 return rc;
907
908err_release_timer_2:
909 sis190_delete_timer(dev);
910 sis190_rx_clear(tp);
911err_free_rx_1:
912 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
913 tp->rx_dma);
914err_free_tx_0:
915 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
916 tp->tx_dma);
917 goto out;
918}
919
920static void sis190_tx_clear(struct sis190_private *tp)
921{
922 unsigned int i;
923
924 for (i = 0; i < NUM_TX_DESC; i++) {
925 struct sk_buff *skb = tp->Tx_skbuff[i];
926
927 if (!skb)
928 continue;
929
930 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
931 tp->Tx_skbuff[i] = NULL;
932 dev_kfree_skb(skb);
933
934 tp->stats.tx_dropped++;
935 }
936 tp->cur_tx = tp->dirty_tx = 0;
937}
938
939static void sis190_down(struct net_device *dev)
940{
941 struct sis190_private *tp = netdev_priv(dev);
942 void __iomem *ioaddr = tp->mmio_addr;
943 unsigned int poll_locked = 0;
944
945 sis190_delete_timer(dev);
946
947 netif_stop_queue(dev);
948
949 flush_scheduled_work();
950
951 do {
952 spin_lock_irq(&tp->lock);
953
954 sis190_asic_down(ioaddr);
955
956 spin_unlock_irq(&tp->lock);
957
958 synchronize_irq(dev->irq);
959
960 if (!poll_locked) {
961 netif_poll_disable(dev);
962 poll_locked++;
963 }
964
965 synchronize_sched();
966
967 } while (SIS_R32(IntrMask));
968
969 sis190_tx_clear(tp);
970 sis190_rx_clear(tp);
971}
972
973static int sis190_close(struct net_device *dev)
974{
975 struct sis190_private *tp = netdev_priv(dev);
976 struct pci_dev *pdev = tp->pci_dev;
977
978 sis190_down(dev);
979
980 free_irq(dev->irq, dev);
981
982 netif_poll_enable(dev);
983
984 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
985 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
986
987 tp->TxDescRing = NULL;
988 tp->RxDescRing = NULL;
989
990 return 0;
991}
992
993static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
994{
995 struct sis190_private *tp = netdev_priv(dev);
996 void __iomem *ioaddr = tp->mmio_addr;
997 u32 len, entry, dirty_tx;
998 struct TxDesc *desc;
999 dma_addr_t mapping;
1000
1001 if (unlikely(skb->len < ETH_ZLEN)) {
1002 skb = skb_padto(skb, ETH_ZLEN);
1003 if (!skb) {
1004 tp->stats.tx_dropped++;
1005 goto out;
1006 }
1007 len = ETH_ZLEN;
1008 } else {
1009 len = skb->len;
1010 }
1011
1012 entry = tp->cur_tx % NUM_TX_DESC;
1013 desc = tp->TxDescRing + entry;
1014
1015 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1016 netif_stop_queue(dev);
1017 net_tx_err(tp, KERN_ERR PFX
1018 "%s: BUG! Tx Ring full when queue awake!\n",
1019 dev->name);
1020 return NETDEV_TX_BUSY;
1021 }
1022
1023 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1024
1025 tp->Tx_skbuff[entry] = skb;
1026
1027 desc->PSize = cpu_to_le32(len);
1028 desc->addr = cpu_to_le32(mapping);
1029
1030 desc->size = cpu_to_le32(len);
1031 if (entry == (NUM_TX_DESC - 1))
1032 desc->size |= cpu_to_le32(RingEnd);
1033
1034 wmb();
1035
1036 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1037
1038 tp->cur_tx++;
1039
1040 smp_wmb();
1041
1042 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1043
1044 dev->trans_start = jiffies;
1045
1046 dirty_tx = tp->dirty_tx;
1047 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1048 netif_stop_queue(dev);
1049 smp_rmb();
1050 if (dirty_tx != tp->dirty_tx)
1051 netif_wake_queue(dev);
1052 }
1053out:
1054 return NETDEV_TX_OK;
1055}
1056
1057static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1058{
1059 struct sis190_private *tp = netdev_priv(dev);
1060
1061 return &tp->stats;
1062}
1063
1064static void sis190_release_board(struct pci_dev *pdev)
1065{
1066 struct net_device *dev = pci_get_drvdata(pdev);
1067 struct sis190_private *tp = netdev_priv(dev);
1068
1069 iounmap(tp->mmio_addr);
1070 pci_release_regions(pdev);
1071 pci_disable_device(pdev);
1072 free_netdev(dev);
1073}
1074
1075static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1076{
1077 struct sis190_private *tp;
1078 struct net_device *dev;
1079 void __iomem *ioaddr;
1080 int rc;
1081
1082 dev = alloc_etherdev(sizeof(*tp));
1083 if (!dev) {
1084 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1085 rc = -ENOMEM;
1086 goto err_out_0;
1087 }
1088
1089 SET_MODULE_OWNER(dev);
1090 SET_NETDEV_DEV(dev, &pdev->dev);
1091
1092 tp = netdev_priv(dev);
1093 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1094
1095 rc = pci_enable_device(pdev);
1096 if (rc < 0) {
1097 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1098 goto err_free_dev_1;
1099 }
1100
1101 rc = -ENODEV;
1102
1103 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1104 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1105 pci_name(pdev));
1106 goto err_pci_disable_2;
1107 }
1108 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1109 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1110 pci_name(pdev));
1111 goto err_pci_disable_2;
1112 }
1113
1114 rc = pci_request_regions(pdev, DRV_NAME);
1115 if (rc < 0) {
1116 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1117 pci_name(pdev));
1118 goto err_pci_disable_2;
1119 }
1120
1121 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1122 if (rc < 0) {
1123 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1124 pci_name(pdev));
1125 goto err_free_res_3;
1126 }
1127
1128 pci_set_master(pdev);
1129
1130 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1131 if (!ioaddr) {
1132 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1133 pci_name(pdev));
1134 rc = -EIO;
1135 goto err_free_res_3;
1136 }
1137
1138 tp->pci_dev = pdev;
1139 tp->mmio_addr = ioaddr;
1140
1141 sis190_irq_mask_and_ack(ioaddr);
1142
1143 sis190_soft_reset(ioaddr);
1144out:
1145 return dev;
1146
1147err_free_res_3:
1148 pci_release_regions(pdev);
1149err_pci_disable_2:
1150 pci_disable_device(pdev);
1151err_free_dev_1:
1152 free_netdev(dev);
1153err_out_0:
1154 dev = ERR_PTR(rc);
1155 goto out;
1156}
1157
1158static void sis190_tx_timeout(struct net_device *dev)
1159{
1160 struct sis190_private *tp = netdev_priv(dev);
1161 void __iomem *ioaddr = tp->mmio_addr;
1162 u8 tmp8;
1163
1164 /* Disable Tx, if not already */
1165 tmp8 = SIS_R8(TxControl);
1166 if (tmp8 & CmdTxEnb)
1167 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1168
1169 /* Disable interrupts by clearing the interrupt mask. */
1170 SIS_W32(IntrMask, 0x0000);
1171
1172 /* Stop a shared interrupt from scavenging while we are. */
1173 spin_lock_irq(&tp->lock);
1174 sis190_tx_clear(tp);
1175 spin_unlock_irq(&tp->lock);
1176
1177 /* ...and finally, reset everything. */
1178 sis190_hw_start(dev);
1179
1180 netif_wake_queue(dev);
1181}
1182
1183static void sis190_set_speed_auto(struct net_device *dev)
1184{
1185 struct sis190_private *tp = netdev_priv(dev);
1186 void __iomem *ioaddr = tp->mmio_addr;
1187 int val;
1188
1189 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1190
1191 val = mdio_read(ioaddr, MII_ADVERTISE);
1192
1193 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1194 // unchanged.
1195 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1196 ADVERTISE_100FULL | ADVERTISE_10FULL |
1197 ADVERTISE_100HALF | ADVERTISE_10HALF);
1198
1199 // Enable 1000 Full Mode.
1200 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1201
1202 // Enable auto-negotiation and restart auto-negotiation.
1203 mdio_write(ioaddr, MII_BMCR,
1204 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1205}
1206
1207static void sis190_get_drvinfo(struct net_device *dev,
1208 struct ethtool_drvinfo *info)
1209{
1210 struct sis190_private *tp = netdev_priv(dev);
1211
1212 strcpy(info->driver, DRV_NAME);
1213 strcpy(info->version, DRV_VERSION);
1214 strcpy(info->bus_info, pci_name(tp->pci_dev));
1215}
1216
1217static int sis190_get_regs_len(struct net_device *dev)
1218{
1219 return SIS190_REGS_SIZE;
1220}
1221
1222static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1223 void *p)
1224{
1225 struct sis190_private *tp = netdev_priv(dev);
1226 unsigned long flags;
1227
1228 if (regs->len > SIS190_REGS_SIZE)
1229 regs->len = SIS190_REGS_SIZE;
1230
1231 spin_lock_irqsave(&tp->lock, flags);
1232 memcpy_fromio(p, tp->mmio_addr, regs->len);
1233 spin_unlock_irqrestore(&tp->lock, flags);
1234}
1235
1236static u32 sis190_get_msglevel(struct net_device *dev)
1237{
1238 struct sis190_private *tp = netdev_priv(dev);
1239
1240 return tp->msg_enable;
1241}
1242
1243static void sis190_set_msglevel(struct net_device *dev, u32 value)
1244{
1245 struct sis190_private *tp = netdev_priv(dev);
1246
1247 tp->msg_enable = value;
1248}
1249
1250static struct ethtool_ops sis190_ethtool_ops = {
1251 .get_drvinfo = sis190_get_drvinfo,
1252 .get_regs_len = sis190_get_regs_len,
1253 .get_regs = sis190_get_regs,
1254 .get_link = ethtool_op_get_link,
1255 .get_msglevel = sis190_get_msglevel,
1256 .set_msglevel = sis190_set_msglevel,
1257};
1258
1259static int __devinit sis190_init_one(struct pci_dev *pdev,
1260 const struct pci_device_id *ent)
1261{
1262 static int printed_version = 0;
1263 struct sis190_private *tp;
1264 struct net_device *dev;
1265 void __iomem *ioaddr;
1266 int i, rc;
1267
1268 if (!printed_version) {
1269 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1270 printed_version = 1;
1271 }
1272
1273 dev = sis190_init_board(pdev);
1274 if (IS_ERR(dev)) {
1275 rc = PTR_ERR(dev);
1276 goto out;
1277 }
1278
1279 tp = netdev_priv(dev);
1280 ioaddr = tp->mmio_addr;
1281
1282 /* Get MAC address */
1283 /* Read node address from the EEPROM */
1284
1285 if (SIS_R32(ROMControl) & 0x4) {
1286 for (i = 0; i < 3; i++) {
1287 SIS_W16(RxMacAddr + 2*i,
1288 sis190_read_eeprom(ioaddr, 3 + i));
1289 }
1290 }
1291
1292 for (i = 0; i < MAC_ADDR_LEN; i++)
1293 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1294
1295 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1296
1297 dev->open = sis190_open;
1298 dev->stop = sis190_close;
1299 dev->get_stats = sis190_get_stats;
1300 dev->tx_timeout = sis190_tx_timeout;
1301 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1302 dev->hard_start_xmit = sis190_start_xmit;
1303 dev->set_multicast_list = sis190_set_rx_mode;
1304 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1305 dev->irq = pdev->irq;
1306 dev->base_addr = (unsigned long) 0xdead;
1307
1308 spin_lock_init(&tp->lock);
1309 rc = register_netdev(dev);
1310 if (rc < 0) {
1311 sis190_release_board(pdev);
1312 goto out;
1313 }
1314
1315 pci_set_drvdata(pdev, dev);
1316
1317 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1318 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1319 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1320 ioaddr, dev->irq,
1321 dev->dev_addr[0], dev->dev_addr[1],
1322 dev->dev_addr[2], dev->dev_addr[3],
1323 dev->dev_addr[4], dev->dev_addr[5]);
1324
1325 netif_carrier_off(dev);
1326
1327 sis190_set_speed_auto(dev);
1328out:
1329 return rc;
1330}
1331
1332static void __devexit sis190_remove_one(struct pci_dev *pdev)
1333{
1334 struct net_device *dev = pci_get_drvdata(pdev);
1335
1336 unregister_netdev(dev);
1337 sis190_release_board(pdev);
1338 pci_set_drvdata(pdev, NULL);
1339}
1340
1341static struct pci_driver sis190_pci_driver = {
1342 .name = DRV_NAME,
1343 .id_table = sis190_pci_tbl,
1344 .probe = sis190_init_one,
1345 .remove = __devexit_p(sis190_remove_one),
1346};
1347
1348static int __init sis190_init_module(void)
1349{
1350 return pci_module_init(&sis190_pci_driver);
1351}
1352
1353static void __exit sis190_cleanup_module(void)
1354{
1355 pci_unregister_driver(&sis190_pci_driver);
1356}
1357
1358module_init(sis190_init_module);
1359module_exit(sis190_cleanup_module);