aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tulip/dmfe.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/tulip/dmfe.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/tulip/dmfe.c')
-rw-r--r--drivers/net/tulip/dmfe.c2066
1 files changed, 2066 insertions, 0 deletions
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
new file mode 100644
index 000000000000..e25f33df223e
--- /dev/null
+++ b/drivers/net/tulip/dmfe.c
@@ -0,0 +1,2066 @@
1/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
55
56 TODO
57
58 Implement pci_driver::suspend() and pci_driver::resume()
59 power management methods.
60
61 Check on 64 bit boxes.
62 Check and fix on big endian boxes.
63
64 Test and make sure PCI latency is now correct for all cases.
65*/
66
67#define DRV_NAME "dmfe"
68#define DRV_VERSION "1.36.4"
69#define DRV_RELDATE "2002-01-17"
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/timer.h>
75#include <linux/ptrace.h>
76#include <linux/errno.h>
77#include <linux/ioport.h>
78#include <linux/slab.h>
79#include <linux/interrupt.h>
80#include <linux/pci.h>
81#include <linux/init.h>
82#include <linux/netdevice.h>
83#include <linux/etherdevice.h>
84#include <linux/ethtool.h>
85#include <linux/skbuff.h>
86#include <linux/delay.h>
87#include <linux/spinlock.h>
88#include <linux/crc32.h>
89#include <linux/bitops.h>
90
91#include <asm/processor.h>
92#include <asm/io.h>
93#include <asm/dma.h>
94#include <asm/uaccess.h>
95#include <asm/irq.h>
96
97
98/* Board/System/Debug information/definition ---------------- */
99#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
100#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
101#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
102#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
103
104#define DM9102_IO_SIZE 0x80
105#define DM9102A_IO_SIZE 0x100
106#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
107#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
108#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
109#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
110#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
111#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
112#define TX_BUF_ALLOC 0x600
113#define RX_ALLOC_SIZE 0x620
114#define DM910X_RESET 1
115#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
116#define CR6_DEFAULT 0x00080000 /* HD */
117#define CR7_DEFAULT 0x180c1
118#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
119#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
120#define MAX_PACKET_SIZE 1514
121#define DMFE_MAX_MULTICAST 14
122#define RX_COPY_SIZE 100
123#define MAX_CHECK_PACKET 0x8000
124#define DM9801_NOISE_FLOOR 8
125#define DM9802_NOISE_FLOOR 5
126
127#define DMFE_10MHF 0
128#define DMFE_100MHF 1
129#define DMFE_10MFD 4
130#define DMFE_100MFD 5
131#define DMFE_AUTO 8
132#define DMFE_1M_HPNA 0x10
133
134#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
135#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
136#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
137#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
138#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
139#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
140
141#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
142#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
143#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
144
145#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
146
147#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
148
149
150/* CR9 definition: SROM/MII */
151#define CR9_SROM_READ 0x4800
152#define CR9_SRCS 0x1
153#define CR9_SRCLK 0x2
154#define CR9_CRDOUT 0x8
155#define SROM_DATA_0 0x0
156#define SROM_DATA_1 0x4
157#define PHY_DATA_1 0x20000
158#define PHY_DATA_0 0x00000
159#define MDCLKH 0x10000
160
161#define PHY_POWER_DOWN 0x800
162
163#define SROM_V41_CODE 0x14
164
165#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
166
167#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
168#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
169
170/* Sten Check */
171#define DEVICE net_device
172
173/* Structure/enum declaration ------------------------------- */
174struct tx_desc {
175 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
176 char *tx_buf_ptr; /* Data for us */
177 struct tx_desc *next_tx_desc;
178} __attribute__(( aligned(32) ));
179
180struct rx_desc {
181 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
182 struct sk_buff *rx_skb_ptr; /* Data for us */
183 struct rx_desc *next_rx_desc;
184} __attribute__(( aligned(32) ));
185
186struct dmfe_board_info {
187 u32 chip_id; /* Chip vendor/Device ID */
188 u32 chip_revision; /* Chip revision */
189 struct DEVICE *next_dev; /* next device */
190 struct pci_dev *pdev; /* PCI device */
191 spinlock_t lock;
192
193 long ioaddr; /* I/O base address */
194 u32 cr0_data;
195 u32 cr5_data;
196 u32 cr6_data;
197 u32 cr7_data;
198 u32 cr15_data;
199
200 /* pointer for memory physical address */
201 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
202 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
203 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
204 dma_addr_t first_tx_desc_dma;
205 dma_addr_t first_rx_desc_dma;
206
207 /* descriptor pointer */
208 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
209 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
210 unsigned char *desc_pool_ptr; /* descriptor pool memory */
211 struct tx_desc *first_tx_desc;
212 struct tx_desc *tx_insert_ptr;
213 struct tx_desc *tx_remove_ptr;
214 struct rx_desc *first_rx_desc;
215 struct rx_desc *rx_insert_ptr;
216 struct rx_desc *rx_ready_ptr; /* packet come pointer */
217 unsigned long tx_packet_cnt; /* transmitted packet count */
218 unsigned long tx_queue_cnt; /* wait to send packet count */
219 unsigned long rx_avail_cnt; /* available rx descriptor count */
220 unsigned long interval_rx_cnt; /* rx packet count a callback time */
221
222 u16 HPNA_command; /* For HPNA register 16 */
223 u16 HPNA_timer; /* For HPNA remote device check */
224 u16 dbug_cnt;
225 u16 NIC_capability; /* NIC media capability */
226 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
227
228 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
229 u8 chip_type; /* Keep DM9102A chip type */
230 u8 media_mode; /* user specify media mode */
231 u8 op_mode; /* real work media mode */
232 u8 phy_addr;
233 u8 link_failed; /* Ever link failed */
234 u8 wait_reset; /* Hardware failed, need to reset */
235 u8 dm910x_chk_mode; /* Operating mode check */
236 u8 first_in_callback; /* Flag to record state */
237 struct timer_list timer;
238
239 /* System defined statistic counter */
240 struct net_device_stats stats;
241
242 /* Driver defined statistic counter */
243 unsigned long tx_fifo_underrun;
244 unsigned long tx_loss_carrier;
245 unsigned long tx_no_carrier;
246 unsigned long tx_late_collision;
247 unsigned long tx_excessive_collision;
248 unsigned long tx_jabber_timeout;
249 unsigned long reset_count;
250 unsigned long reset_cr8;
251 unsigned long reset_fatal;
252 unsigned long reset_TXtimeout;
253
254 /* NIC SROM data */
255 unsigned char srom[128];
256};
257
258enum dmfe_offsets {
259 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
260 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
261 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
262 DCR15 = 0x78
263};
264
265enum dmfe_CR6_bits {
266 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
267 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
268 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
269};
270
271/* Global variable declaration ----------------------------- */
272static int __devinitdata printed_version;
273static char version[] __devinitdata =
274 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
275 DRV_VERSION " (" DRV_RELDATE ")\n";
276
277static int dmfe_debug;
278static unsigned char dmfe_media_mode = DMFE_AUTO;
279static u32 dmfe_cr6_user_set;
280
281/* For module input parameter */
282static int debug;
283static u32 cr6set;
284static unsigned char mode = 8;
285static u8 chkmode = 1;
286static u8 HPNA_mode; /* Default: Low Power/High Speed */
287static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
288static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
289static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
290static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
291 4: TX pause packet */
292
293
294/* function declaration ------------------------------------- */
295static int dmfe_open(struct DEVICE *);
296static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
297static int dmfe_stop(struct DEVICE *);
298static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
299static void dmfe_set_filter_mode(struct DEVICE *);
300static struct ethtool_ops netdev_ethtool_ops;
301static u16 read_srom_word(long ,int);
302static irqreturn_t dmfe_interrupt(int , void *, struct pt_regs *);
303#ifdef CONFIG_NET_POLL_CONTROLLER
304static void poll_dmfe (struct net_device *dev);
305#endif
306static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
307static void allocate_rx_buffer(struct dmfe_board_info *);
308static void update_cr6(u32, unsigned long);
309static void send_filter_frame(struct DEVICE * ,int);
310static void dm9132_id_table(struct DEVICE * ,int);
311static u16 phy_read(unsigned long, u8, u8, u32);
312static void phy_write(unsigned long, u8, u8, u16, u32);
313static void phy_write_1bit(unsigned long, u32);
314static u16 phy_read_1bit(unsigned long);
315static u8 dmfe_sense_speed(struct dmfe_board_info *);
316static void dmfe_process_mode(struct dmfe_board_info *);
317static void dmfe_timer(unsigned long);
318static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
319static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
320static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
321static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
322static void dmfe_dynamic_reset(struct DEVICE *);
323static void dmfe_free_rxbuffer(struct dmfe_board_info *);
324static void dmfe_init_dm910x(struct DEVICE *);
325static void dmfe_parse_srom(struct dmfe_board_info *);
326static void dmfe_program_DM9801(struct dmfe_board_info *, int);
327static void dmfe_program_DM9802(struct dmfe_board_info *);
328static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
329static void dmfe_set_phyxcer(struct dmfe_board_info *);
330
331/* DM910X network baord routine ---------------------------- */
332
333/*
334 * Search DM910X board ,allocate space and register it
335 */
336
337static int __devinit dmfe_init_one (struct pci_dev *pdev,
338 const struct pci_device_id *ent)
339{
340 struct dmfe_board_info *db; /* board information structure */
341 struct net_device *dev;
342 u32 dev_rev, pci_pmr;
343 int i, err;
344
345 DMFE_DBUG(0, "dmfe_init_one()", 0);
346
347 if (!printed_version++)
348 printk(version);
349
350 /* Init network device */
351 dev = alloc_etherdev(sizeof(*db));
352 if (dev == NULL)
353 return -ENOMEM;
354 SET_MODULE_OWNER(dev);
355 SET_NETDEV_DEV(dev, &pdev->dev);
356
357 if (pci_set_dma_mask(pdev, 0xffffffff)) {
358 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
359 err = -ENODEV;
360 goto err_out_free;
361 }
362
363 /* Enable Master/IO access, Disable memory access */
364 err = pci_enable_device(pdev);
365 if (err)
366 goto err_out_free;
367
368 if (!pci_resource_start(pdev, 0)) {
369 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
370 err = -ENODEV;
371 goto err_out_disable;
372 }
373
374 /* Read Chip revision */
375 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
376
377 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
378 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
379 err = -ENODEV;
380 goto err_out_disable;
381 }
382
383#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
384
385 /* Set Latency Timer 80h */
386 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
387 Need a PCI quirk.. */
388
389 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
390#endif
391
392 if (pci_request_regions(pdev, DRV_NAME)) {
393 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
394 err = -ENODEV;
395 goto err_out_disable;
396 }
397
398 /* Init system & device */
399 db = netdev_priv(dev);
400
401 /* Allocate Tx/Rx descriptor memory */
402 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
403 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
404
405 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
406 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
407 db->buf_pool_start = db->buf_pool_ptr;
408 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
409
410 db->chip_id = ent->driver_data;
411 db->ioaddr = pci_resource_start(pdev, 0);
412 db->chip_revision = dev_rev;
413
414 db->pdev = pdev;
415
416 dev->base_addr = db->ioaddr;
417 dev->irq = pdev->irq;
418 pci_set_drvdata(pdev, dev);
419 dev->open = &dmfe_open;
420 dev->hard_start_xmit = &dmfe_start_xmit;
421 dev->stop = &dmfe_stop;
422 dev->get_stats = &dmfe_get_stats;
423 dev->set_multicast_list = &dmfe_set_filter_mode;
424#ifdef CONFIG_NET_POLL_CONTROLLER
425 dev->poll_controller = &poll_dmfe;
426#endif
427 dev->ethtool_ops = &netdev_ethtool_ops;
428 spin_lock_init(&db->lock);
429
430 pci_read_config_dword(pdev, 0x50, &pci_pmr);
431 pci_pmr &= 0x70000;
432 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
433 db->chip_type = 1; /* DM9102A E3 */
434 else
435 db->chip_type = 0;
436
437 /* read 64 word srom data */
438 for (i = 0; i < 64; i++)
439 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
440
441 /* Set Node address */
442 for (i = 0; i < 6; i++)
443 dev->dev_addr[i] = db->srom[20 + i];
444
445 err = register_netdev (dev);
446 if (err)
447 goto err_out_res;
448
449 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
450 dev->name,
451 ent->driver_data >> 16,
452 pci_name(pdev));
453 for (i = 0; i < 6; i++)
454 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
455 printk(", irq %d.\n", dev->irq);
456
457 pci_set_master(pdev);
458
459 return 0;
460
461err_out_res:
462 pci_release_regions(pdev);
463err_out_disable:
464 pci_disable_device(pdev);
465err_out_free:
466 pci_set_drvdata(pdev, NULL);
467 free_netdev(dev);
468
469 return err;
470}
471
472
473static void __devexit dmfe_remove_one (struct pci_dev *pdev)
474{
475 struct net_device *dev = pci_get_drvdata(pdev);
476 struct dmfe_board_info *db = netdev_priv(dev);
477
478 DMFE_DBUG(0, "dmfe_remove_one()", 0);
479
480 if (dev) {
481 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
482 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
483 db->desc_pool_dma_ptr);
484 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
485 db->buf_pool_ptr, db->buf_pool_dma_ptr);
486 unregister_netdev(dev);
487 pci_release_regions(pdev);
488 free_netdev(dev); /* free board information */
489 pci_set_drvdata(pdev, NULL);
490 }
491
492 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
493}
494
495
496/*
497 * Open the interface.
498 * The interface is opened whenever "ifconfig" actives it.
499 */
500
501static int dmfe_open(struct DEVICE *dev)
502{
503 int ret;
504 struct dmfe_board_info *db = netdev_priv(dev);
505
506 DMFE_DBUG(0, "dmfe_open", 0);
507
508 ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
509 if (ret)
510 return ret;
511
512 /* system variable init */
513 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
514 db->tx_packet_cnt = 0;
515 db->tx_queue_cnt = 0;
516 db->rx_avail_cnt = 0;
517 db->link_failed = 1;
518 db->wait_reset = 0;
519
520 db->first_in_callback = 0;
521 db->NIC_capability = 0xf; /* All capability*/
522 db->PHY_reg4 = 0x1e0;
523
524 /* CR6 operation mode decision */
525 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
526 (db->chip_revision >= 0x02000030) ) {
527 db->cr6_data |= DMFE_TXTH_256;
528 db->cr0_data = CR0_DEFAULT;
529 db->dm910x_chk_mode=4; /* Enter the normal mode */
530 } else {
531 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
532 db->cr0_data = 0;
533 db->dm910x_chk_mode = 1; /* Enter the check mode */
534 }
535
536 /* Initilize DM910X board */
537 dmfe_init_dm910x(dev);
538
539 /* Active System Interface */
540 netif_wake_queue(dev);
541
542 /* set and active a timer process */
543 init_timer(&db->timer);
544 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
545 db->timer.data = (unsigned long)dev;
546 db->timer.function = &dmfe_timer;
547 add_timer(&db->timer);
548
549 return 0;
550}
551
552
553/* Initilize DM910X board
554 * Reset DM910X board
555 * Initilize TX/Rx descriptor chain structure
556 * Send the set-up frame
557 * Enable Tx/Rx machine
558 */
559
560static void dmfe_init_dm910x(struct DEVICE *dev)
561{
562 struct dmfe_board_info *db = netdev_priv(dev);
563 unsigned long ioaddr = db->ioaddr;
564
565 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
566
567 /* Reset DM910x MAC controller */
568 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
569 udelay(100);
570 outl(db->cr0_data, ioaddr + DCR0);
571 udelay(5);
572
573 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
574 db->phy_addr = 1;
575
576 /* Parser SROM and media mode */
577 dmfe_parse_srom(db);
578 db->media_mode = dmfe_media_mode;
579
580 /* RESET Phyxcer Chip by GPR port bit 7 */
581 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
582 if (db->chip_id == PCI_DM9009_ID) {
583 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
584 mdelay(300); /* Delay 300 ms */
585 }
586 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
587
588 /* Process Phyxcer Media Mode */
589 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
590 dmfe_set_phyxcer(db);
591
592 /* Media Mode Process */
593 if ( !(db->media_mode & DMFE_AUTO) )
594 db->op_mode = db->media_mode; /* Force Mode */
595
596 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
597 dmfe_descriptor_init(db, ioaddr);
598
599 /* Init CR6 to program DM910x operation */
600 update_cr6(db->cr6_data, ioaddr);
601
602 /* Send setup frame */
603 if (db->chip_id == PCI_DM9132_ID)
604 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
605 else
606 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
607
608 /* Init CR7, interrupt active bit */
609 db->cr7_data = CR7_DEFAULT;
610 outl(db->cr7_data, ioaddr + DCR7);
611
612 /* Init CR15, Tx jabber and Rx watchdog timer */
613 outl(db->cr15_data, ioaddr + DCR15);
614
615 /* Enable DM910X Tx/Rx function */
616 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
617 update_cr6(db->cr6_data, ioaddr);
618}
619
620
621/*
622 * Hardware start transmission.
623 * Send a packet to media from the upper layer.
624 */
625
626static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
627{
628 struct dmfe_board_info *db = netdev_priv(dev);
629 struct tx_desc *txptr;
630 unsigned long flags;
631
632 DMFE_DBUG(0, "dmfe_start_xmit", 0);
633
634 /* Resource flag check */
635 netif_stop_queue(dev);
636
637 /* Too large packet check */
638 if (skb->len > MAX_PACKET_SIZE) {
639 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
640 dev_kfree_skb(skb);
641 return 0;
642 }
643
644 spin_lock_irqsave(&db->lock, flags);
645
646 /* No Tx resource check, it never happen nromally */
647 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
648 spin_unlock_irqrestore(&db->lock, flags);
649 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
650 return 1;
651 }
652
653 /* Disable NIC interrupt */
654 outl(0, dev->base_addr + DCR7);
655
656 /* transmit this packet */
657 txptr = db->tx_insert_ptr;
658 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
659 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
660
661 /* Point to next transmit free descriptor */
662 db->tx_insert_ptr = txptr->next_tx_desc;
663
664 /* Transmit Packet Process */
665 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
666 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
667 db->tx_packet_cnt++; /* Ready to send */
668 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
669 dev->trans_start = jiffies; /* saved time stamp */
670 } else {
671 db->tx_queue_cnt++; /* queue TX packet */
672 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
673 }
674
675 /* Tx resource check */
676 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
677 netif_wake_queue(dev);
678
679 /* Restore CR7 to enable interrupt */
680 spin_unlock_irqrestore(&db->lock, flags);
681 outl(db->cr7_data, dev->base_addr + DCR7);
682
683 /* free this SKB */
684 dev_kfree_skb(skb);
685
686 return 0;
687}
688
689
690/*
691 * Stop the interface.
692 * The interface is stopped when it is brought.
693 */
694
695static int dmfe_stop(struct DEVICE *dev)
696{
697 struct dmfe_board_info *db = netdev_priv(dev);
698 unsigned long ioaddr = dev->base_addr;
699
700 DMFE_DBUG(0, "dmfe_stop", 0);
701
702 /* disable system */
703 netif_stop_queue(dev);
704
705 /* deleted timer */
706 del_timer_sync(&db->timer);
707
708 /* Reset & stop DM910X board */
709 outl(DM910X_RESET, ioaddr + DCR0);
710 udelay(5);
711 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
712
713 /* free interrupt */
714 free_irq(dev->irq, dev);
715
716 /* free allocated rx buffer */
717 dmfe_free_rxbuffer(db);
718
719#if 0
720 /* show statistic counter */
721 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
722 db->tx_fifo_underrun, db->tx_excessive_collision,
723 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
724 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
725 db->reset_fatal, db->reset_TXtimeout);
726#endif
727
728 return 0;
729}
730
731
732/*
733 * DM9102 insterrupt handler
734 * receive the packet to upper layer, free the transmitted packet
735 */
736
737static irqreturn_t dmfe_interrupt(int irq, void *dev_id, struct pt_regs *regs)
738{
739 struct DEVICE *dev = dev_id;
740 struct dmfe_board_info *db = netdev_priv(dev);
741 unsigned long ioaddr = dev->base_addr;
742 unsigned long flags;
743
744 DMFE_DBUG(0, "dmfe_interrupt()", 0);
745
746 if (!dev) {
747 DMFE_DBUG(1, "dmfe_interrupt() without DEVICE arg", 0);
748 return IRQ_NONE;
749 }
750
751 spin_lock_irqsave(&db->lock, flags);
752
753 /* Got DM910X status */
754 db->cr5_data = inl(ioaddr + DCR5);
755 outl(db->cr5_data, ioaddr + DCR5);
756 if ( !(db->cr5_data & 0xc1) ) {
757 spin_unlock_irqrestore(&db->lock, flags);
758 return IRQ_HANDLED;
759 }
760
761 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
762 outl(0, ioaddr + DCR7);
763
764 /* Check system status */
765 if (db->cr5_data & 0x2000) {
766 /* system bus error happen */
767 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
768 db->reset_fatal++;
769 db->wait_reset = 1; /* Need to RESET */
770 spin_unlock_irqrestore(&db->lock, flags);
771 return IRQ_HANDLED;
772 }
773
774 /* Received the coming packet */
775 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
776 dmfe_rx_packet(dev, db);
777
778 /* reallocate rx descriptor buffer */
779 if (db->rx_avail_cnt<RX_DESC_CNT)
780 allocate_rx_buffer(db);
781
782 /* Free the transmitted descriptor */
783 if ( db->cr5_data & 0x01)
784 dmfe_free_tx_pkt(dev, db);
785
786 /* Mode Check */
787 if (db->dm910x_chk_mode & 0x2) {
788 db->dm910x_chk_mode = 0x4;
789 db->cr6_data |= 0x100;
790 update_cr6(db->cr6_data, db->ioaddr);
791 }
792
793 /* Restore CR7 to enable interrupt mask */
794 outl(db->cr7_data, ioaddr + DCR7);
795
796 spin_unlock_irqrestore(&db->lock, flags);
797 return IRQ_HANDLED;
798}
799
800
801#ifdef CONFIG_NET_POLL_CONTROLLER
802/*
803 * Polling 'interrupt' - used by things like netconsole to send skbs
804 * without having to re-enable interrupts. It's not called while
805 * the interrupt routine is executing.
806 */
807
808static void poll_dmfe (struct net_device *dev)
809{
810 /* disable_irq here is not very nice, but with the lockless
811 interrupt handler we have no other choice. */
812 disable_irq(dev->irq);
813 dmfe_interrupt (dev->irq, dev, NULL);
814 enable_irq(dev->irq);
815}
816#endif
817
818/*
819 * Free TX resource after TX complete
820 */
821
822static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
823{
824 struct tx_desc *txptr;
825 unsigned long ioaddr = dev->base_addr;
826 u32 tdes0;
827
828 txptr = db->tx_remove_ptr;
829 while(db->tx_packet_cnt) {
830 tdes0 = le32_to_cpu(txptr->tdes0);
831 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
832 if (tdes0 & 0x80000000)
833 break;
834
835 /* A packet sent completed */
836 db->tx_packet_cnt--;
837 db->stats.tx_packets++;
838
839 /* Transmit statistic counter */
840 if ( tdes0 != 0x7fffffff ) {
841 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
842 db->stats.collisions += (tdes0 >> 3) & 0xf;
843 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
844 if (tdes0 & TDES0_ERR_MASK) {
845 db->stats.tx_errors++;
846
847 if (tdes0 & 0x0002) { /* UnderRun */
848 db->tx_fifo_underrun++;
849 if ( !(db->cr6_data & CR6_SFT) ) {
850 db->cr6_data = db->cr6_data | CR6_SFT;
851 update_cr6(db->cr6_data, db->ioaddr);
852 }
853 }
854 if (tdes0 & 0x0100)
855 db->tx_excessive_collision++;
856 if (tdes0 & 0x0200)
857 db->tx_late_collision++;
858 if (tdes0 & 0x0400)
859 db->tx_no_carrier++;
860 if (tdes0 & 0x0800)
861 db->tx_loss_carrier++;
862 if (tdes0 & 0x4000)
863 db->tx_jabber_timeout++;
864 }
865 }
866
867 txptr = txptr->next_tx_desc;
868 }/* End of while */
869
870 /* Update TX remove pointer to next */
871 db->tx_remove_ptr = txptr;
872
873 /* Send the Tx packet in queue */
874 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
875 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
876 db->tx_packet_cnt++; /* Ready to send */
877 db->tx_queue_cnt--;
878 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
879 dev->trans_start = jiffies; /* saved time stamp */
880 }
881
882 /* Resource available check */
883 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
884 netif_wake_queue(dev); /* Active upper layer, send again */
885}
886
887
888/*
889 * Calculate the CRC valude of the Rx packet
890 * flag = 1 : return the reverse CRC (for the received packet CRC)
891 * 0 : return the normal CRC (for Hash Table index)
892 */
893
894static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
895{
896 u32 crc = crc32(~0, Data, Len);
897 if (flag) crc = ~crc;
898 return crc;
899}
900
901
902/*
903 * Receive the come packet and pass to upper layer
904 */
905
906static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
907{
908 struct rx_desc *rxptr;
909 struct sk_buff *skb;
910 int rxlen;
911 u32 rdes0;
912
913 rxptr = db->rx_ready_ptr;
914
915 while(db->rx_avail_cnt) {
916 rdes0 = le32_to_cpu(rxptr->rdes0);
917 if (rdes0 & 0x80000000) /* packet owner check */
918 break;
919
920 db->rx_avail_cnt--;
921 db->interval_rx_cnt++;
922
923 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
924 if ( (rdes0 & 0x300) != 0x300) {
925 /* A packet without First/Last flag */
926 /* reuse this SKB */
927 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
928 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
929 } else {
930 /* A packet with First/Last flag */
931 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
932
933 /* error summary bit check */
934 if (rdes0 & 0x8000) {
935 /* This is a error packet */
936 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
937 db->stats.rx_errors++;
938 if (rdes0 & 1)
939 db->stats.rx_fifo_errors++;
940 if (rdes0 & 2)
941 db->stats.rx_crc_errors++;
942 if (rdes0 & 0x80)
943 db->stats.rx_length_errors++;
944 }
945
946 if ( !(rdes0 & 0x8000) ||
947 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
948 skb = rxptr->rx_skb_ptr;
949
950 /* Received Packet CRC check need or not */
951 if ( (db->dm910x_chk_mode & 1) &&
952 (cal_CRC(skb->tail, rxlen, 1) !=
953 (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
954 /* Found a error received packet */
955 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
956 db->dm910x_chk_mode = 3;
957 } else {
958 /* Good packet, send to upper layer */
959 /* Shorst packet used new SKB */
960 if ( (rxlen < RX_COPY_SIZE) &&
961 ( (skb = dev_alloc_skb(rxlen + 2) )
962 != NULL) ) {
963 /* size less than COPY_SIZE, allocate a rxlen SKB */
964 skb->dev = dev;
965 skb_reserve(skb, 2); /* 16byte align */
966 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
967 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
968 } else {
969 skb->dev = dev;
970 skb_put(skb, rxlen);
971 }
972 skb->protocol = eth_type_trans(skb, dev);
973 netif_rx(skb);
974 dev->last_rx = jiffies;
975 db->stats.rx_packets++;
976 db->stats.rx_bytes += rxlen;
977 }
978 } else {
979 /* Reuse SKB buffer when the packet is error */
980 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
981 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
982 }
983 }
984
985 rxptr = rxptr->next_rx_desc;
986 }
987
988 db->rx_ready_ptr = rxptr;
989}
990
991
992/*
993 * Get statistics from driver.
994 */
995
996static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
997{
998 struct dmfe_board_info *db = netdev_priv(dev);
999
1000 DMFE_DBUG(0, "dmfe_get_stats", 0);
1001 return &db->stats;
1002}
1003
1004
1005/*
1006 * Set DM910X multicast address
1007 */
1008
1009static void dmfe_set_filter_mode(struct DEVICE * dev)
1010{
1011 struct dmfe_board_info *db = netdev_priv(dev);
1012 unsigned long flags;
1013
1014 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1015 spin_lock_irqsave(&db->lock, flags);
1016
1017 if (dev->flags & IFF_PROMISC) {
1018 DMFE_DBUG(0, "Enable PROM Mode", 0);
1019 db->cr6_data |= CR6_PM | CR6_PBF;
1020 update_cr6(db->cr6_data, db->ioaddr);
1021 spin_unlock_irqrestore(&db->lock, flags);
1022 return;
1023 }
1024
1025 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1026 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1027 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1028 db->cr6_data |= CR6_PAM;
1029 spin_unlock_irqrestore(&db->lock, flags);
1030 return;
1031 }
1032
1033 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1034 if (db->chip_id == PCI_DM9132_ID)
1035 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1036 else
1037 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1038 spin_unlock_irqrestore(&db->lock, flags);
1039}
1040
1041static void netdev_get_drvinfo(struct net_device *dev,
1042 struct ethtool_drvinfo *info)
1043{
1044 struct dmfe_board_info *np = netdev_priv(dev);
1045
1046 strcpy(info->driver, DRV_NAME);
1047 strcpy(info->version, DRV_VERSION);
1048 if (np->pdev)
1049 strcpy(info->bus_info, pci_name(np->pdev));
1050 else
1051 sprintf(info->bus_info, "EISA 0x%lx %d",
1052 dev->base_addr, dev->irq);
1053}
1054
1055static struct ethtool_ops netdev_ethtool_ops = {
1056 .get_drvinfo = netdev_get_drvinfo,
1057};
1058
1059/*
1060 * A periodic timer routine
1061 * Dynamic media sense, allocate Rx buffer...
1062 */
1063
1064static void dmfe_timer(unsigned long data)
1065{
1066 u32 tmp_cr8;
1067 unsigned char tmp_cr12;
1068 struct DEVICE *dev = (struct DEVICE *) data;
1069 struct dmfe_board_info *db = netdev_priv(dev);
1070 unsigned long flags;
1071
1072 DMFE_DBUG(0, "dmfe_timer()", 0);
1073 spin_lock_irqsave(&db->lock, flags);
1074
1075 /* Media mode process when Link OK before enter this route */
1076 if (db->first_in_callback == 0) {
1077 db->first_in_callback = 1;
1078 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1079 db->cr6_data &= ~0x40000;
1080 update_cr6(db->cr6_data, db->ioaddr);
1081 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1082 db->cr6_data |= 0x40000;
1083 update_cr6(db->cr6_data, db->ioaddr);
1084 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1085 add_timer(&db->timer);
1086 spin_unlock_irqrestore(&db->lock, flags);
1087 return;
1088 }
1089 }
1090
1091
1092 /* Operating Mode Check */
1093 if ( (db->dm910x_chk_mode & 0x1) &&
1094 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1095 db->dm910x_chk_mode = 0x4;
1096
1097 /* Dynamic reset DM910X : system error or transmit time-out */
1098 tmp_cr8 = inl(db->ioaddr + DCR8);
1099 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1100 db->reset_cr8++;
1101 db->wait_reset = 1;
1102 }
1103 db->interval_rx_cnt = 0;
1104
1105 /* TX polling kick monitor */
1106 if ( db->tx_packet_cnt &&
1107 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1108 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1109
1110 /* TX Timeout */
1111 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1112 db->reset_TXtimeout++;
1113 db->wait_reset = 1;
1114 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1115 dev->name);
1116 }
1117 }
1118
1119 if (db->wait_reset) {
1120 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1121 db->reset_count++;
1122 dmfe_dynamic_reset(dev);
1123 db->first_in_callback = 0;
1124 db->timer.expires = DMFE_TIMER_WUT;
1125 add_timer(&db->timer);
1126 spin_unlock_irqrestore(&db->lock, flags);
1127 return;
1128 }
1129
1130 /* Link status check, Dynamic media type change */
1131 if (db->chip_id == PCI_DM9132_ID)
1132 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1133 else
1134 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1135
1136 if ( ((db->chip_id == PCI_DM9102_ID) &&
1137 (db->chip_revision == 0x02000030)) ||
1138 ((db->chip_id == PCI_DM9132_ID) &&
1139 (db->chip_revision == 0x02000010)) ) {
1140 /* DM9102A Chip */
1141 if (tmp_cr12 & 2)
1142 tmp_cr12 = 0x0; /* Link failed */
1143 else
1144 tmp_cr12 = 0x3; /* Link OK */
1145 }
1146
1147 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1148 /* Link Failed */
1149 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1150 db->link_failed = 1;
1151
1152 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1153 /* AUTO or force 1M Homerun/Longrun don't need */
1154 if ( !(db->media_mode & 0x38) )
1155 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1156
1157 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1158 if (db->media_mode & DMFE_AUTO) {
1159 /* 10/100M link failed, used 1M Home-Net */
1160 db->cr6_data|=0x00040000; /* bit18=1, MII */
1161 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1162 update_cr6(db->cr6_data, db->ioaddr);
1163 }
1164 } else
1165 if ((tmp_cr12 & 0x3) && db->link_failed) {
1166 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1167 db->link_failed = 0;
1168
1169 /* Auto Sense Speed */
1170 if ( (db->media_mode & DMFE_AUTO) &&
1171 dmfe_sense_speed(db) )
1172 db->link_failed = 1;
1173 dmfe_process_mode(db);
1174 /* SHOW_MEDIA_TYPE(db->op_mode); */
1175 }
1176
1177 /* HPNA remote command check */
1178 if (db->HPNA_command & 0xf00) {
1179 db->HPNA_timer--;
1180 if (!db->HPNA_timer)
1181 dmfe_HPNA_remote_cmd_chk(db);
1182 }
1183
1184 /* Timer active again */
1185 db->timer.expires = DMFE_TIMER_WUT;
1186 add_timer(&db->timer);
1187 spin_unlock_irqrestore(&db->lock, flags);
1188}
1189
1190
1191/*
1192 * Dynamic reset the DM910X board
1193 * Stop DM910X board
1194 * Free Tx/Rx allocated memory
1195 * Reset DM910X board
1196 * Re-initilize DM910X board
1197 */
1198
1199static void dmfe_dynamic_reset(struct DEVICE *dev)
1200{
1201 struct dmfe_board_info *db = netdev_priv(dev);
1202
1203 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1204
1205 /* Sopt MAC controller */
1206 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1207 update_cr6(db->cr6_data, dev->base_addr);
1208 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1209 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1210
1211 /* Disable upper layer interface */
1212 netif_stop_queue(dev);
1213
1214 /* Free Rx Allocate buffer */
1215 dmfe_free_rxbuffer(db);
1216
1217 /* system variable init */
1218 db->tx_packet_cnt = 0;
1219 db->tx_queue_cnt = 0;
1220 db->rx_avail_cnt = 0;
1221 db->link_failed = 1;
1222 db->wait_reset = 0;
1223
1224 /* Re-initilize DM910X board */
1225 dmfe_init_dm910x(dev);
1226
1227 /* Restart upper layer interface */
1228 netif_wake_queue(dev);
1229}
1230
1231
1232/*
1233 * free all allocated rx buffer
1234 */
1235
1236static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1237{
1238 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1239
1240 /* free allocated rx buffer */
1241 while (db->rx_avail_cnt) {
1242 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1243 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1244 db->rx_avail_cnt--;
1245 }
1246}
1247
1248
1249/*
1250 * Reuse the SK buffer
1251 */
1252
1253static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1254{
1255 struct rx_desc *rxptr = db->rx_insert_ptr;
1256
1257 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1258 rxptr->rx_skb_ptr = skb;
1259 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1260 wmb();
1261 rxptr->rdes0 = cpu_to_le32(0x80000000);
1262 db->rx_avail_cnt++;
1263 db->rx_insert_ptr = rxptr->next_rx_desc;
1264 } else
1265 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1266}
1267
1268
1269/*
1270 * Initialize transmit/Receive descriptor
1271 * Using Chain structure, and allocate Tx/Rx buffer
1272 */
1273
1274static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1275{
1276 struct tx_desc *tmp_tx;
1277 struct rx_desc *tmp_rx;
1278 unsigned char *tmp_buf;
1279 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1280 dma_addr_t tmp_buf_dma;
1281 int i;
1282
1283 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1284
1285 /* tx descriptor start pointer */
1286 db->tx_insert_ptr = db->first_tx_desc;
1287 db->tx_remove_ptr = db->first_tx_desc;
1288 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1289
1290 /* rx descriptor start pointer */
1291 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1292 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1293 db->rx_insert_ptr = db->first_rx_desc;
1294 db->rx_ready_ptr = db->first_rx_desc;
1295 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1296
1297 /* Init Transmit chain */
1298 tmp_buf = db->buf_pool_start;
1299 tmp_buf_dma = db->buf_pool_dma_start;
1300 tmp_tx_dma = db->first_tx_desc_dma;
1301 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1302 tmp_tx->tx_buf_ptr = tmp_buf;
1303 tmp_tx->tdes0 = cpu_to_le32(0);
1304 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1305 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1306 tmp_tx_dma += sizeof(struct tx_desc);
1307 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1308 tmp_tx->next_tx_desc = tmp_tx + 1;
1309 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1310 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1311 }
1312 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1313 tmp_tx->next_tx_desc = db->first_tx_desc;
1314
1315 /* Init Receive descriptor chain */
1316 tmp_rx_dma=db->first_rx_desc_dma;
1317 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1318 tmp_rx->rdes0 = cpu_to_le32(0);
1319 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1320 tmp_rx_dma += sizeof(struct rx_desc);
1321 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1322 tmp_rx->next_rx_desc = tmp_rx + 1;
1323 }
1324 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1325 tmp_rx->next_rx_desc = db->first_rx_desc;
1326
1327 /* pre-allocate Rx buffer */
1328 allocate_rx_buffer(db);
1329}
1330
1331
1332/*
1333 * Update CR6 value
1334 * Firstly stop DM910X , then written value and start
1335 */
1336
1337static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1338{
1339 u32 cr6_tmp;
1340
1341 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1342 outl(cr6_tmp, ioaddr + DCR6);
1343 udelay(5);
1344 outl(cr6_data, ioaddr + DCR6);
1345 udelay(5);
1346}
1347
1348
1349/*
1350 * Send a setup frame for DM9132
1351 * This setup frame initilize DM910X address filter mode
1352*/
1353
1354static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1355{
1356 struct dev_mc_list *mcptr;
1357 u16 * addrptr;
1358 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1359 u32 hash_val;
1360 u16 i, hash_table[4];
1361
1362 DMFE_DBUG(0, "dm9132_id_table()", 0);
1363
1364 /* Node address */
1365 addrptr = (u16 *) dev->dev_addr;
1366 outw(addrptr[0], ioaddr);
1367 ioaddr += 4;
1368 outw(addrptr[1], ioaddr);
1369 ioaddr += 4;
1370 outw(addrptr[2], ioaddr);
1371 ioaddr += 4;
1372
1373 /* Clear Hash Table */
1374 for (i = 0; i < 4; i++)
1375 hash_table[i] = 0x0;
1376
1377 /* broadcast address */
1378 hash_table[3] = 0x8000;
1379
1380 /* the multicast address in Hash Table : 64 bits */
1381 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1382 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1383 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1384 }
1385
1386 /* Write the hash table to MAC MD table */
1387 for (i = 0; i < 4; i++, ioaddr += 4)
1388 outw(hash_table[i], ioaddr);
1389}
1390
1391
1392/*
1393 * Send a setup frame for DM9102/DM9102A
1394 * This setup frame initilize DM910X address filter mode
1395 */
1396
1397static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1398{
1399 struct dmfe_board_info *db = netdev_priv(dev);
1400 struct dev_mc_list *mcptr;
1401 struct tx_desc *txptr;
1402 u16 * addrptr;
1403 u32 * suptr;
1404 int i;
1405
1406 DMFE_DBUG(0, "send_filter_frame()", 0);
1407
1408 txptr = db->tx_insert_ptr;
1409 suptr = (u32 *) txptr->tx_buf_ptr;
1410
1411 /* Node address */
1412 addrptr = (u16 *) dev->dev_addr;
1413 *suptr++ = addrptr[0];
1414 *suptr++ = addrptr[1];
1415 *suptr++ = addrptr[2];
1416
1417 /* broadcast address */
1418 *suptr++ = 0xffff;
1419 *suptr++ = 0xffff;
1420 *suptr++ = 0xffff;
1421
1422 /* fit the multicast address */
1423 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1424 addrptr = (u16 *) mcptr->dmi_addr;
1425 *suptr++ = addrptr[0];
1426 *suptr++ = addrptr[1];
1427 *suptr++ = addrptr[2];
1428 }
1429
1430 for (; i<14; i++) {
1431 *suptr++ = 0xffff;
1432 *suptr++ = 0xffff;
1433 *suptr++ = 0xffff;
1434 }
1435
1436 /* prepare the setup frame */
1437 db->tx_insert_ptr = txptr->next_tx_desc;
1438 txptr->tdes1 = cpu_to_le32(0x890000c0);
1439
1440 /* Resource Check and Send the setup packet */
1441 if (!db->tx_packet_cnt) {
1442 /* Resource Empty */
1443 db->tx_packet_cnt++;
1444 txptr->tdes0 = cpu_to_le32(0x80000000);
1445 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1446 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1447 update_cr6(db->cr6_data, dev->base_addr);
1448 dev->trans_start = jiffies;
1449 } else
1450 db->tx_queue_cnt++; /* Put in TX queue */
1451}
1452
1453
1454/*
1455 * Allocate rx buffer,
1456 * As possible as allocate maxiumn Rx buffer
1457 */
1458
1459static void allocate_rx_buffer(struct dmfe_board_info *db)
1460{
1461 struct rx_desc *rxptr;
1462 struct sk_buff *skb;
1463
1464 rxptr = db->rx_insert_ptr;
1465
1466 while(db->rx_avail_cnt < RX_DESC_CNT) {
1467 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1468 break;
1469 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1470 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1471 wmb();
1472 rxptr->rdes0 = cpu_to_le32(0x80000000);
1473 rxptr = rxptr->next_rx_desc;
1474 db->rx_avail_cnt++;
1475 }
1476
1477 db->rx_insert_ptr = rxptr;
1478}
1479
1480
1481/*
1482 * Read one word data from the serial ROM
1483 */
1484
1485static u16 read_srom_word(long ioaddr, int offset)
1486{
1487 int i;
1488 u16 srom_data = 0;
1489 long cr9_ioaddr = ioaddr + DCR9;
1490
1491 outl(CR9_SROM_READ, cr9_ioaddr);
1492 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1493
1494 /* Send the Read Command 110b */
1495 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1496 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1497 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1498
1499 /* Send the offset */
1500 for (i = 5; i >= 0; i--) {
1501 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1502 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1503 }
1504
1505 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1506
1507 for (i = 16; i > 0; i--) {
1508 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1509 udelay(5);
1510 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1511 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1512 udelay(5);
1513 }
1514
1515 outl(CR9_SROM_READ, cr9_ioaddr);
1516 return srom_data;
1517}
1518
1519
1520/*
1521 * Auto sense the media mode
1522 */
1523
1524static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1525{
1526 u8 ErrFlag = 0;
1527 u16 phy_mode;
1528
1529 /* CR6 bit18=0, select 10/100M */
1530 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1531
1532 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1533 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1534
1535 if ( (phy_mode & 0x24) == 0x24 ) {
1536 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1537 phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
1538 else /* DM9102/DM9102A */
1539 phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
1540 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1541 switch (phy_mode) {
1542 case 0x1000: db->op_mode = DMFE_10MHF; break;
1543 case 0x2000: db->op_mode = DMFE_10MFD; break;
1544 case 0x4000: db->op_mode = DMFE_100MHF; break;
1545 case 0x8000: db->op_mode = DMFE_100MFD; break;
1546 default: db->op_mode = DMFE_10MHF;
1547 ErrFlag = 1;
1548 break;
1549 }
1550 } else {
1551 db->op_mode = DMFE_10MHF;
1552 DMFE_DBUG(0, "Link Failed :", phy_mode);
1553 ErrFlag = 1;
1554 }
1555
1556 return ErrFlag;
1557}
1558
1559
1560/*
1561 * Set 10/100 phyxcer capability
1562 * AUTO mode : phyxcer register4 is NIC capability
1563 * Force mode: phyxcer register4 is the force media
1564 */
1565
1566static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1567{
1568 u16 phy_reg;
1569
1570 /* Select 10/100M phyxcer */
1571 db->cr6_data &= ~0x40000;
1572 update_cr6(db->cr6_data, db->ioaddr);
1573
1574 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1575 if (db->chip_id == PCI_DM9009_ID) {
1576 phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
1577 phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
1578 }
1579
1580 /* Phyxcer capability setting */
1581 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1582
1583 if (db->media_mode & DMFE_AUTO) {
1584 /* AUTO Mode */
1585 phy_reg |= db->PHY_reg4;
1586 } else {
1587 /* Force Mode */
1588 switch(db->media_mode) {
1589 case DMFE_10MHF: phy_reg |= 0x20; break;
1590 case DMFE_10MFD: phy_reg |= 0x40; break;
1591 case DMFE_100MHF: phy_reg |= 0x80; break;
1592 case DMFE_100MFD: phy_reg |= 0x100; break;
1593 }
1594 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1595 }
1596
1597 /* Write new capability to Phyxcer Reg4 */
1598 if ( !(phy_reg & 0x01e0)) {
1599 phy_reg|=db->PHY_reg4;
1600 db->media_mode|=DMFE_AUTO;
1601 }
1602 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1603
1604 /* Restart Auto-Negotiation */
1605 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1606 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1607 if ( !db->chip_type )
1608 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1609}
1610
1611
1612/*
1613 * Process op-mode
1614 * AUTO mode : PHY controller in Auto-negotiation Mode
1615 * Force mode: PHY controller in force mode with HUB
1616 * N-way force capability with SWITCH
1617 */
1618
1619static void dmfe_process_mode(struct dmfe_board_info *db)
1620{
1621 u16 phy_reg;
1622
1623 /* Full Duplex Mode Check */
1624 if (db->op_mode & 0x4)
1625 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1626 else
1627 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1628
1629 /* Transciver Selection */
1630 if (db->op_mode & 0x10) /* 1M HomePNA */
1631 db->cr6_data |= 0x40000;/* External MII select */
1632 else
1633 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1634
1635 update_cr6(db->cr6_data, db->ioaddr);
1636
1637 /* 10/100M phyxcer force mode need */
1638 if ( !(db->media_mode & 0x18)) {
1639 /* Forece Mode */
1640 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1641 if ( !(phy_reg & 0x1) ) {
1642 /* parter without N-Way capability */
1643 phy_reg = 0x0;
1644 switch(db->op_mode) {
1645 case DMFE_10MHF: phy_reg = 0x0; break;
1646 case DMFE_10MFD: phy_reg = 0x100; break;
1647 case DMFE_100MHF: phy_reg = 0x2000; break;
1648 case DMFE_100MFD: phy_reg = 0x2100; break;
1649 }
1650 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1651 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1652 mdelay(20);
1653 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1654 }
1655 }
1656}
1657
1658
1659/*
1660 * Write a word to Phy register
1661 */
1662
1663static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1664{
1665 u16 i;
1666 unsigned long ioaddr;
1667
1668 if (chip_id == PCI_DM9132_ID) {
1669 ioaddr = iobase + 0x80 + offset * 4;
1670 outw(phy_data, ioaddr);
1671 } else {
1672 /* DM9102/DM9102A Chip */
1673 ioaddr = iobase + DCR9;
1674
1675 /* Send 33 synchronization clock to Phy controller */
1676 for (i = 0; i < 35; i++)
1677 phy_write_1bit(ioaddr, PHY_DATA_1);
1678
1679 /* Send start command(01) to Phy */
1680 phy_write_1bit(ioaddr, PHY_DATA_0);
1681 phy_write_1bit(ioaddr, PHY_DATA_1);
1682
1683 /* Send write command(01) to Phy */
1684 phy_write_1bit(ioaddr, PHY_DATA_0);
1685 phy_write_1bit(ioaddr, PHY_DATA_1);
1686
1687 /* Send Phy address */
1688 for (i = 0x10; i > 0; i = i >> 1)
1689 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1690
1691 /* Send register address */
1692 for (i = 0x10; i > 0; i = i >> 1)
1693 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1694
1695 /* written trasnition */
1696 phy_write_1bit(ioaddr, PHY_DATA_1);
1697 phy_write_1bit(ioaddr, PHY_DATA_0);
1698
1699 /* Write a word data to PHY controller */
1700 for ( i = 0x8000; i > 0; i >>= 1)
1701 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1702 }
1703}
1704
1705
1706/*
1707 * Read a word data from phy register
1708 */
1709
1710static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1711{
1712 int i;
1713 u16 phy_data;
1714 unsigned long ioaddr;
1715
1716 if (chip_id == PCI_DM9132_ID) {
1717 /* DM9132 Chip */
1718 ioaddr = iobase + 0x80 + offset * 4;
1719 phy_data = inw(ioaddr);
1720 } else {
1721 /* DM9102/DM9102A Chip */
1722 ioaddr = iobase + DCR9;
1723
1724 /* Send 33 synchronization clock to Phy controller */
1725 for (i = 0; i < 35; i++)
1726 phy_write_1bit(ioaddr, PHY_DATA_1);
1727
1728 /* Send start command(01) to Phy */
1729 phy_write_1bit(ioaddr, PHY_DATA_0);
1730 phy_write_1bit(ioaddr, PHY_DATA_1);
1731
1732 /* Send read command(10) to Phy */
1733 phy_write_1bit(ioaddr, PHY_DATA_1);
1734 phy_write_1bit(ioaddr, PHY_DATA_0);
1735
1736 /* Send Phy address */
1737 for (i = 0x10; i > 0; i = i >> 1)
1738 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1739
1740 /* Send register address */
1741 for (i = 0x10; i > 0; i = i >> 1)
1742 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1743
1744 /* Skip transition state */
1745 phy_read_1bit(ioaddr);
1746
1747 /* read 16bit data */
1748 for (phy_data = 0, i = 0; i < 16; i++) {
1749 phy_data <<= 1;
1750 phy_data |= phy_read_1bit(ioaddr);
1751 }
1752 }
1753
1754 return phy_data;
1755}
1756
1757
1758/*
1759 * Write one bit data to Phy Controller
1760 */
1761
1762static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1763{
1764 outl(phy_data, ioaddr); /* MII Clock Low */
1765 udelay(1);
1766 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1767 udelay(1);
1768 outl(phy_data, ioaddr); /* MII Clock Low */
1769 udelay(1);
1770}
1771
1772
1773/*
1774 * Read one bit phy data from PHY controller
1775 */
1776
1777static u16 phy_read_1bit(unsigned long ioaddr)
1778{
1779 u16 phy_data;
1780
1781 outl(0x50000, ioaddr);
1782 udelay(1);
1783 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1784 outl(0x40000, ioaddr);
1785 udelay(1);
1786
1787 return phy_data;
1788}
1789
1790
1791/*
1792 * Parser SROM and media mode
1793 */
1794
1795static void dmfe_parse_srom(struct dmfe_board_info * db)
1796{
1797 char * srom = db->srom;
1798 int dmfe_mode, tmp_reg;
1799
1800 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1801
1802 /* Init CR15 */
1803 db->cr15_data = CR15_DEFAULT;
1804
1805 /* Check SROM Version */
1806 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1807 /* SROM V4.01 */
1808 /* Get NIC support media mode */
1809 db->NIC_capability = le16_to_cpup(srom + 34);
1810 db->PHY_reg4 = 0;
1811 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1812 switch( db->NIC_capability & tmp_reg ) {
1813 case 0x1: db->PHY_reg4 |= 0x0020; break;
1814 case 0x2: db->PHY_reg4 |= 0x0040; break;
1815 case 0x4: db->PHY_reg4 |= 0x0080; break;
1816 case 0x8: db->PHY_reg4 |= 0x0100; break;
1817 }
1818 }
1819
1820 /* Media Mode Force or not check */
1821 dmfe_mode = le32_to_cpup(srom + 34) & le32_to_cpup(srom + 36);
1822 switch(dmfe_mode) {
1823 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1824 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1825 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1826 case 0x100:
1827 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1828 }
1829
1830 /* Special Function setting */
1831 /* VLAN function */
1832 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1833 db->cr15_data |= 0x40;
1834
1835 /* Flow Control */
1836 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1837 db->cr15_data |= 0x400;
1838
1839 /* TX pause packet */
1840 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1841 db->cr15_data |= 0x9800;
1842 }
1843
1844 /* Parse HPNA parameter */
1845 db->HPNA_command = 1;
1846
1847 /* Accept remote command or not */
1848 if (HPNA_rx_cmd == 0)
1849 db->HPNA_command |= 0x8000;
1850
1851 /* Issue remote command & operation mode */
1852 if (HPNA_tx_cmd == 1)
1853 switch(HPNA_mode) { /* Issue Remote Command */
1854 case 0: db->HPNA_command |= 0x0904; break;
1855 case 1: db->HPNA_command |= 0x0a00; break;
1856 case 2: db->HPNA_command |= 0x0506; break;
1857 case 3: db->HPNA_command |= 0x0602; break;
1858 }
1859 else
1860 switch(HPNA_mode) { /* Don't Issue */
1861 case 0: db->HPNA_command |= 0x0004; break;
1862 case 1: db->HPNA_command |= 0x0000; break;
1863 case 2: db->HPNA_command |= 0x0006; break;
1864 case 3: db->HPNA_command |= 0x0002; break;
1865 }
1866
1867 /* Check DM9801 or DM9802 present or not */
1868 db->HPNA_present = 0;
1869 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1870 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1871 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1872 /* DM9801 or DM9802 present */
1873 db->HPNA_timer = 8;
1874 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1875 /* DM9801 HomeRun */
1876 db->HPNA_present = 1;
1877 dmfe_program_DM9801(db, tmp_reg);
1878 } else {
1879 /* DM9802 LongRun */
1880 db->HPNA_present = 2;
1881 dmfe_program_DM9802(db);
1882 }
1883 }
1884
1885}
1886
1887
1888/*
1889 * Init HomeRun DM9801
1890 */
1891
1892static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1893{
1894 uint reg17, reg25;
1895
1896 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1897 switch(HPNA_rev) {
1898 case 0xb900: /* DM9801 E3 */
1899 db->HPNA_command |= 0x1000;
1900 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1901 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1902 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1903 break;
1904 case 0xb901: /* DM9801 E4 */
1905 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1906 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1907 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1908 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1909 break;
1910 case 0xb902: /* DM9801 E5 */
1911 case 0xb903: /* DM9801 E6 */
1912 default:
1913 db->HPNA_command |= 0x1000;
1914 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1915 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1916 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1917 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1918 break;
1919 }
1920 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1921 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1922 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1923}
1924
1925
1926/*
1927 * Init HomeRun DM9802
1928 */
1929
1930static void dmfe_program_DM9802(struct dmfe_board_info * db)
1931{
1932 uint phy_reg;
1933
1934 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1935 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1936 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1937 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1938 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1939}
1940
1941
1942/*
1943 * Check remote HPNA power and speed status. If not correct,
1944 * issue command again.
1945*/
1946
1947static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1948{
1949 uint phy_reg;
1950
1951 /* Got remote device status */
1952 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
1953 switch(phy_reg) {
1954 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
1955 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
1956 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
1957 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
1958 }
1959
1960 /* Check remote device status match our setting ot not */
1961 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
1962 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1963 db->HPNA_timer=8;
1964 } else
1965 db->HPNA_timer=600; /* Match, every 10 minutes, check */
1966}
1967
1968
1969
1970static struct pci_device_id dmfe_pci_tbl[] = {
1971 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
1972 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1973 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
1974 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
1975 { 0, }
1976};
1977MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
1978
1979
1980static struct pci_driver dmfe_driver = {
1981 .name = "dmfe",
1982 .id_table = dmfe_pci_tbl,
1983 .probe = dmfe_init_one,
1984 .remove = __devexit_p(dmfe_remove_one),
1985};
1986
1987MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
1988MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
1989MODULE_LICENSE("GPL");
1990MODULE_VERSION(DRV_VERSION);
1991
1992module_param(debug, int, 0);
1993module_param(mode, byte, 0);
1994module_param(cr6set, int, 0);
1995module_param(chkmode, byte, 0);
1996module_param(HPNA_mode, byte, 0);
1997module_param(HPNA_rx_cmd, byte, 0);
1998module_param(HPNA_tx_cmd, byte, 0);
1999module_param(HPNA_NoiseFloor, byte, 0);
2000module_param(SF_mode, byte, 0);
2001MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2002MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2003MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2004
2005/* Description:
2006 * when user used insmod to add module, system invoked init_module()
2007 * to initilize and register.
2008 */
2009
2010static int __init dmfe_init_module(void)
2011{
2012 int rc;
2013
2014 printk(version);
2015 printed_version = 1;
2016
2017 DMFE_DBUG(0, "init_module() ", debug);
2018
2019 if (debug)
2020 dmfe_debug = debug; /* set debug flag */
2021 if (cr6set)
2022 dmfe_cr6_user_set = cr6set;
2023
2024 switch(mode) {
2025 case DMFE_10MHF:
2026 case DMFE_100MHF:
2027 case DMFE_10MFD:
2028 case DMFE_100MFD:
2029 case DMFE_1M_HPNA:
2030 dmfe_media_mode = mode;
2031 break;
2032 default:dmfe_media_mode = DMFE_AUTO;
2033 break;
2034 }
2035
2036 if (HPNA_mode > 4)
2037 HPNA_mode = 0; /* Default: LP/HS */
2038 if (HPNA_rx_cmd > 1)
2039 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2040 if (HPNA_tx_cmd > 1)
2041 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2042 if (HPNA_NoiseFloor > 15)
2043 HPNA_NoiseFloor = 0;
2044
2045 rc = pci_module_init(&dmfe_driver);
2046 if (rc < 0)
2047 return rc;
2048
2049 return 0;
2050}
2051
2052
2053/*
2054 * Description:
2055 * when user used rmmod to delete module, system invoked clean_module()
2056 * to un-register all registered services.
2057 */
2058
2059static void __exit dmfe_cleanup_module(void)
2060{
2061 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2062 pci_unregister_driver(&dmfe_driver);
2063}
2064
2065module_init(dmfe_init_module);
2066module_exit(dmfe_cleanup_module);