diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-05-20 03:04:35 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-11 19:29:50 -0400 |
commit | 3401299a1b9e747cbf7de2cc0c8f6376c3cbe565 (patch) | |
tree | bdc229019b5a31d8b3bcf422055eccb261259ca4 /drivers/net/ethernet/dlink | |
parent | a8fe65b8f031c5c0a7414059773eaa962e5243cb (diff) |
de6*/dl2k/sundance: Move the D-Link drivers
Move the D-Link drivers into drivers/net/ethernet/dlink/ and
make the necessary Kconfig and Makefile changes.
CC: Bjorn Ekwall <bj0rn@blox.se>
CC: Donald Becker <becker@scyld.com>
CC: Edward Peng <edward_peng@dlink.com.tw>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/dlink')
-rw-r--r-- | drivers/net/ethernet/dlink/Kconfig | 84 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/Makefile | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/de600.c | 530 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/de600.h | 168 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/de620.c | 988 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/de620.h | 117 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/dl2k.c | 1824 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/dl2k.h | 554 | ||||
-rw-r--r-- | drivers/net/ethernet/dlink/sundance.c | 1940 |
9 files changed, 6213 insertions, 0 deletions
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig new file mode 100644 index 000000000000..9fdb66b66f15 --- /dev/null +++ b/drivers/net/ethernet/dlink/Kconfig | |||
@@ -0,0 +1,84 @@ | |||
1 | # | ||
2 | # D-Link device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_DLINK | ||
6 | bool "D-Link devices" | ||
7 | depends on PCI || PARPORT | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say Y | ||
10 | and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about D-Link devices. If you say Y, you will be asked for | ||
16 | your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_DLINK | ||
19 | |||
20 | config DE600 | ||
21 | tristate "D-Link DE600 pocket adapter support" | ||
22 | depends on PARPORT | ||
23 | ---help--- | ||
24 | This is a network (Ethernet) device which attaches to your parallel | ||
25 | port. Read <file:Documentation/networking/DLINK.txt> as well as the | ||
26 | Ethernet-HOWTO, available from | ||
27 | <http://www.tldp.org/docs.html#howto>, if you want to use | ||
28 | this. It is possible to have several devices share a single parallel | ||
29 | port and it is safe to compile the corresponding drivers into the | ||
30 | kernel. | ||
31 | |||
32 | To compile this driver as a module, choose M here: the module | ||
33 | will be called de600. | ||
34 | |||
35 | config DE620 | ||
36 | tristate "D-Link DE620 pocket adapter support" | ||
37 | depends on PARPORT | ||
38 | ---help--- | ||
39 | This is a network (Ethernet) device which attaches to your parallel | ||
40 | port. Read <file:Documentation/networking/DLINK.txt> as well as the | ||
41 | Ethernet-HOWTO, available from | ||
42 | <http://www.tldp.org/docs.html#howto>, if you want to use | ||
43 | this. It is possible to have several devices share a single parallel | ||
44 | port and it is safe to compile the corresponding drivers into the | ||
45 | kernel. | ||
46 | |||
47 | To compile this driver as a module, choose M here: the module | ||
48 | will be called de620. | ||
49 | |||
50 | config DL2K | ||
51 | tristate "DL2000/TC902x-based Gigabit Ethernet support" | ||
52 | depends on PCI | ||
53 | select CRC32 | ||
54 | ---help--- | ||
55 | This driver supports DL2000/TC902x-based Gigabit ethernet cards, | ||
56 | which includes | ||
57 | D-Link DGE-550T Gigabit Ethernet Adapter. | ||
58 | D-Link DL2000-based Gigabit Ethernet Adapter. | ||
59 | Sundance/Tamarack TC902x Gigabit Ethernet Adapter. | ||
60 | |||
61 | To compile this driver as a module, choose M here: the | ||
62 | module will be called dl2k. | ||
63 | |||
64 | config SUNDANCE | ||
65 | tristate "Sundance Alta support" | ||
66 | depends on PCI | ||
67 | select CRC32 | ||
68 | select MII | ||
69 | ---help--- | ||
70 | This driver is for the Sundance "Alta" chip. | ||
71 | More specific information and updates are available from | ||
72 | <http://www.scyld.com/network/sundance.html>. | ||
73 | |||
74 | config SUNDANCE_MMIO | ||
75 | bool "Use MMIO instead of PIO" | ||
76 | depends on SUNDANCE | ||
77 | ---help--- | ||
78 | Enable memory-mapped I/O for interaction with Sundance NIC registers. | ||
79 | Do NOT enable this by default, PIO (enabled when MMIO is disabled) | ||
80 | is known to solve bugs on certain chips. | ||
81 | |||
82 | If unsure, say N. | ||
83 | |||
84 | endif # NET_VENDOR_DLINK | ||
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile new file mode 100644 index 000000000000..c705eaa4f5b2 --- /dev/null +++ b/drivers/net/ethernet/dlink/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the D-Link network device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_DE600) += de600.o | ||
6 | obj-$(CONFIG_DE620) += de620.o | ||
7 | obj-$(CONFIG_DL2K) += dl2k.o | ||
8 | obj-$(CONFIG_SUNDANCE) += sundance.o | ||
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c new file mode 100644 index 000000000000..23a65398d011 --- /dev/null +++ b/drivers/net/ethernet/dlink/de600.c | |||
@@ -0,0 +1,530 @@ | |||
1 | static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n"; | ||
2 | /* | ||
3 | * de600.c | ||
4 | * | ||
5 | * Linux driver for the D-Link DE-600 Ethernet pocket adapter. | ||
6 | * | ||
7 | * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall | ||
8 | * The Author may be reached as bj0rn@blox.se | ||
9 | * | ||
10 | * Based on adapter information gathered from DE600.ASM by D-Link Inc., | ||
11 | * as included on disk C in the v.2.11 of PC/TCP from FTP Software. | ||
12 | * For DE600.asm: | ||
13 | * Portions (C) Copyright 1990 D-Link, Inc. | ||
14 | * Copyright, 1988-1992, Russell Nelson, Crynwr Software | ||
15 | * | ||
16 | * Adapted to the sample network driver core for linux, | ||
17 | * written by: Donald Becker <becker@super.org> | ||
18 | * (Now at <becker@scyld.com>) | ||
19 | * | ||
20 | **************************************************************/ | ||
21 | /* | ||
22 | * This program is free software; you can redistribute it and/or modify | ||
23 | * it under the terms of the GNU General Public License as published by | ||
24 | * the Free Software Foundation; either version 2, or (at your option) | ||
25 | * any later version. | ||
26 | * | ||
27 | * This program is distributed in the hope that it will be useful, | ||
28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
30 | * GNU General Public License for more details. | ||
31 | * | ||
32 | * You should have received a copy of the GNU General Public License | ||
33 | * along with this program; if not, write to the Free Software | ||
34 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
35 | * | ||
36 | **************************************************************/ | ||
37 | |||
38 | /* Add more time here if your adapter won't work OK: */ | ||
39 | #define DE600_SLOW_DOWN udelay(delay_time) | ||
40 | |||
41 | #include <linux/module.h> | ||
42 | #include <linux/kernel.h> | ||
43 | #include <linux/types.h> | ||
44 | #include <linux/fcntl.h> | ||
45 | #include <linux/string.h> | ||
46 | #include <linux/interrupt.h> | ||
47 | #include <linux/ioport.h> | ||
48 | #include <linux/in.h> | ||
49 | #include <asm/system.h> | ||
50 | #include <linux/errno.h> | ||
51 | #include <linux/init.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include <linux/inet.h> | ||
54 | #include <linux/netdevice.h> | ||
55 | #include <linux/etherdevice.h> | ||
56 | #include <linux/skbuff.h> | ||
57 | |||
58 | #include <asm/io.h> | ||
59 | |||
60 | #include "de600.h" | ||
61 | |||
62 | static unsigned int check_lost = 1; | ||
63 | module_param(check_lost, bool, 0); | ||
64 | MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); | ||
65 | |||
66 | static unsigned int delay_time = 10; | ||
67 | module_param(delay_time, int, 0); | ||
68 | MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds"); | ||
69 | |||
70 | |||
71 | /* | ||
72 | * D-Link driver variables: | ||
73 | */ | ||
74 | |||
75 | static volatile int rx_page; | ||
76 | |||
77 | #define TX_PAGES 2 | ||
78 | static volatile int tx_fifo[TX_PAGES]; | ||
79 | static volatile int tx_fifo_in; | ||
80 | static volatile int tx_fifo_out; | ||
81 | static volatile int free_tx_pages = TX_PAGES; | ||
82 | static int was_down; | ||
83 | static DEFINE_SPINLOCK(de600_lock); | ||
84 | |||
85 | static inline u8 de600_read_status(struct net_device *dev) | ||
86 | { | ||
87 | u8 status; | ||
88 | |||
89 | outb_p(STATUS, DATA_PORT); | ||
90 | status = inb(STATUS_PORT); | ||
91 | outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT); | ||
92 | |||
93 | return status; | ||
94 | } | ||
95 | |||
96 | static inline u8 de600_read_byte(unsigned char type, struct net_device *dev) | ||
97 | { | ||
98 | /* dev used by macros */ | ||
99 | u8 lo; | ||
100 | outb_p((type), DATA_PORT); | ||
101 | lo = ((unsigned char)inb(STATUS_PORT)) >> 4; | ||
102 | outb_p((type) | HI_NIBBLE, DATA_PORT); | ||
103 | return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Open/initialize the board. This is called (in the current kernel) | ||
108 | * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1). | ||
109 | * | ||
110 | * This routine should set everything up anew at each open, even | ||
111 | * registers that "should" only need to be set once at boot, so that | ||
112 | * there is a non-reboot way to recover if something goes wrong. | ||
113 | */ | ||
114 | |||
115 | static int de600_open(struct net_device *dev) | ||
116 | { | ||
117 | unsigned long flags; | ||
118 | int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev); | ||
119 | if (ret) { | ||
120 | printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ); | ||
121 | return ret; | ||
122 | } | ||
123 | spin_lock_irqsave(&de600_lock, flags); | ||
124 | ret = adapter_init(dev); | ||
125 | spin_unlock_irqrestore(&de600_lock, flags); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * The inverse routine to de600_open(). | ||
131 | */ | ||
132 | |||
133 | static int de600_close(struct net_device *dev) | ||
134 | { | ||
135 | select_nic(); | ||
136 | rx_page = 0; | ||
137 | de600_put_command(RESET); | ||
138 | de600_put_command(STOP_RESET); | ||
139 | de600_put_command(0); | ||
140 | select_prn(); | ||
141 | free_irq(DE600_IRQ, dev); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static inline void trigger_interrupt(struct net_device *dev) | ||
146 | { | ||
147 | de600_put_command(FLIP_IRQ); | ||
148 | select_prn(); | ||
149 | DE600_SLOW_DOWN; | ||
150 | select_nic(); | ||
151 | de600_put_command(0); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Copy a buffer to the adapter transmit page memory. | ||
156 | * Start sending. | ||
157 | */ | ||
158 | |||
159 | static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | int transmit_from; | ||
163 | int len; | ||
164 | int tickssofar; | ||
165 | u8 *buffer = skb->data; | ||
166 | int i; | ||
167 | |||
168 | if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ | ||
169 | tickssofar = jiffies - dev_trans_start(dev); | ||
170 | if (tickssofar < HZ/20) | ||
171 | return NETDEV_TX_BUSY; | ||
172 | /* else */ | ||
173 | printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); | ||
174 | /* Restart the adapter. */ | ||
175 | spin_lock_irqsave(&de600_lock, flags); | ||
176 | if (adapter_init(dev)) { | ||
177 | spin_unlock_irqrestore(&de600_lock, flags); | ||
178 | return NETDEV_TX_BUSY; | ||
179 | } | ||
180 | spin_unlock_irqrestore(&de600_lock, flags); | ||
181 | } | ||
182 | |||
183 | /* Start real output */ | ||
184 | pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages); | ||
185 | |||
186 | if ((len = skb->len) < RUNT) | ||
187 | len = RUNT; | ||
188 | |||
189 | spin_lock_irqsave(&de600_lock, flags); | ||
190 | select_nic(); | ||
191 | tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len; | ||
192 | tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */ | ||
193 | |||
194 | if(check_lost) | ||
195 | { | ||
196 | /* This costs about 40 instructions per packet... */ | ||
197 | de600_setup_address(NODE_ADDRESS, RW_ADDR); | ||
198 | de600_read_byte(READ_DATA, dev); | ||
199 | if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) { | ||
200 | if (adapter_init(dev)) { | ||
201 | spin_unlock_irqrestore(&de600_lock, flags); | ||
202 | return NETDEV_TX_BUSY; | ||
203 | } | ||
204 | } | ||
205 | } | ||
206 | |||
207 | de600_setup_address(transmit_from, RW_ADDR); | ||
208 | for (i = 0; i < skb->len ; ++i, ++buffer) | ||
209 | de600_put_byte(*buffer); | ||
210 | for (; i < len; ++i) | ||
211 | de600_put_byte(0); | ||
212 | |||
213 | if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */ | ||
214 | dev->trans_start = jiffies; | ||
215 | netif_start_queue(dev); /* allow more packets into adapter */ | ||
216 | /* Send page and generate a faked interrupt */ | ||
217 | de600_setup_address(transmit_from, TX_ADDR); | ||
218 | de600_put_command(TX_ENABLE); | ||
219 | } | ||
220 | else { | ||
221 | if (free_tx_pages) | ||
222 | netif_start_queue(dev); | ||
223 | else | ||
224 | netif_stop_queue(dev); | ||
225 | select_prn(); | ||
226 | } | ||
227 | spin_unlock_irqrestore(&de600_lock, flags); | ||
228 | dev_kfree_skb(skb); | ||
229 | return NETDEV_TX_OK; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * The typical workload of the driver: | ||
234 | * Handle the network interface interrupts. | ||
235 | */ | ||
236 | |||
237 | static irqreturn_t de600_interrupt(int irq, void *dev_id) | ||
238 | { | ||
239 | struct net_device *dev = dev_id; | ||
240 | u8 irq_status; | ||
241 | int retrig = 0; | ||
242 | int boguscount = 0; | ||
243 | |||
244 | spin_lock(&de600_lock); | ||
245 | |||
246 | select_nic(); | ||
247 | irq_status = de600_read_status(dev); | ||
248 | |||
249 | do { | ||
250 | pr_debug("de600_interrupt (%02X)\n", irq_status); | ||
251 | |||
252 | if (irq_status & RX_GOOD) | ||
253 | de600_rx_intr(dev); | ||
254 | else if (!(irq_status & RX_BUSY)) | ||
255 | de600_put_command(RX_ENABLE); | ||
256 | |||
257 | /* Any transmission in progress? */ | ||
258 | if (free_tx_pages < TX_PAGES) | ||
259 | retrig = de600_tx_intr(dev, irq_status); | ||
260 | else | ||
261 | retrig = 0; | ||
262 | |||
263 | irq_status = de600_read_status(dev); | ||
264 | } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) ); | ||
265 | /* | ||
266 | * Yeah, it _looks_ like busy waiting, smells like busy waiting | ||
267 | * and I know it's not PC, but please, it will only occur once | ||
268 | * in a while and then only for a loop or so (< 1ms for sure!) | ||
269 | */ | ||
270 | |||
271 | /* Enable adapter interrupts */ | ||
272 | select_prn(); | ||
273 | if (retrig) | ||
274 | trigger_interrupt(dev); | ||
275 | spin_unlock(&de600_lock); | ||
276 | return IRQ_HANDLED; | ||
277 | } | ||
278 | |||
279 | static int de600_tx_intr(struct net_device *dev, int irq_status) | ||
280 | { | ||
281 | /* | ||
282 | * Returns 1 if tx still not done | ||
283 | */ | ||
284 | |||
285 | /* Check if current transmission is done yet */ | ||
286 | if (irq_status & TX_BUSY) | ||
287 | return 1; /* tx not done, try again */ | ||
288 | |||
289 | /* else */ | ||
290 | /* If last transmission OK then bump fifo index */ | ||
291 | if (!(irq_status & TX_FAILED16)) { | ||
292 | tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; | ||
293 | ++free_tx_pages; | ||
294 | dev->stats.tx_packets++; | ||
295 | netif_wake_queue(dev); | ||
296 | } | ||
297 | |||
298 | /* More to send, or resend last packet? */ | ||
299 | if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) { | ||
300 | dev->trans_start = jiffies; | ||
301 | de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR); | ||
302 | de600_put_command(TX_ENABLE); | ||
303 | return 1; | ||
304 | } | ||
305 | /* else */ | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * We have a good packet, get it out of the adapter. | ||
312 | */ | ||
313 | static void de600_rx_intr(struct net_device *dev) | ||
314 | { | ||
315 | struct sk_buff *skb; | ||
316 | int i; | ||
317 | int read_from; | ||
318 | int size; | ||
319 | unsigned char *buffer; | ||
320 | |||
321 | /* Get size of received packet */ | ||
322 | size = de600_read_byte(RX_LEN, dev); /* low byte */ | ||
323 | size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */ | ||
324 | size -= 4; /* Ignore trailing 4 CRC-bytes */ | ||
325 | |||
326 | /* Tell adapter where to store next incoming packet, enable receiver */ | ||
327 | read_from = rx_page_adr(); | ||
328 | next_rx_page(); | ||
329 | de600_put_command(RX_ENABLE); | ||
330 | |||
331 | if ((size < 32) || (size > 1535)) { | ||
332 | printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size); | ||
333 | if (size > 10000) | ||
334 | adapter_init(dev); | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | skb = dev_alloc_skb(size+2); | ||
339 | if (skb == NULL) { | ||
340 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); | ||
341 | return; | ||
342 | } | ||
343 | /* else */ | ||
344 | |||
345 | skb_reserve(skb,2); /* Align */ | ||
346 | |||
347 | /* 'skb->data' points to the start of sk_buff data area. */ | ||
348 | buffer = skb_put(skb,size); | ||
349 | |||
350 | /* copy the packet into the buffer */ | ||
351 | de600_setup_address(read_from, RW_ADDR); | ||
352 | for (i = size; i > 0; --i, ++buffer) | ||
353 | *buffer = de600_read_byte(READ_DATA, dev); | ||
354 | |||
355 | skb->protocol=eth_type_trans(skb,dev); | ||
356 | |||
357 | netif_rx(skb); | ||
358 | |||
359 | /* update stats */ | ||
360 | dev->stats.rx_packets++; /* count all receives */ | ||
361 | dev->stats.rx_bytes += size; /* count all received bytes */ | ||
362 | |||
363 | /* | ||
364 | * If any worth-while packets have been received, netif_rx() | ||
365 | * will work on them when we get to the tasklets. | ||
366 | */ | ||
367 | } | ||
368 | |||
369 | static const struct net_device_ops de600_netdev_ops = { | ||
370 | .ndo_open = de600_open, | ||
371 | .ndo_stop = de600_close, | ||
372 | .ndo_start_xmit = de600_start_xmit, | ||
373 | .ndo_change_mtu = eth_change_mtu, | ||
374 | .ndo_set_mac_address = eth_mac_addr, | ||
375 | .ndo_validate_addr = eth_validate_addr, | ||
376 | }; | ||
377 | |||
378 | |||
379 | static struct net_device * __init de600_probe(void) | ||
380 | { | ||
381 | int i; | ||
382 | struct net_device *dev; | ||
383 | int err; | ||
384 | |||
385 | dev = alloc_etherdev(0); | ||
386 | if (!dev) | ||
387 | return ERR_PTR(-ENOMEM); | ||
388 | |||
389 | |||
390 | if (!request_region(DE600_IO, 3, "de600")) { | ||
391 | printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); | ||
392 | err = -EBUSY; | ||
393 | goto out; | ||
394 | } | ||
395 | |||
396 | printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name); | ||
397 | /* Alpha testers must have the version number to report bugs. */ | ||
398 | pr_debug("%s", version); | ||
399 | |||
400 | /* probe for adapter */ | ||
401 | err = -ENODEV; | ||
402 | rx_page = 0; | ||
403 | select_nic(); | ||
404 | (void)de600_read_status(dev); | ||
405 | de600_put_command(RESET); | ||
406 | de600_put_command(STOP_RESET); | ||
407 | if (de600_read_status(dev) & 0xf0) { | ||
408 | printk(": not at I/O %#3x.\n", DATA_PORT); | ||
409 | goto out1; | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * Maybe we found one, | ||
414 | * have to check if it is a D-Link DE-600 adapter... | ||
415 | */ | ||
416 | |||
417 | /* Get the adapter ethernet address from the ROM */ | ||
418 | de600_setup_address(NODE_ADDRESS, RW_ADDR); | ||
419 | for (i = 0; i < ETH_ALEN; i++) { | ||
420 | dev->dev_addr[i] = de600_read_byte(READ_DATA, dev); | ||
421 | dev->broadcast[i] = 0xff; | ||
422 | } | ||
423 | |||
424 | /* Check magic code */ | ||
425 | if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) { | ||
426 | /* OK, install real address */ | ||
427 | dev->dev_addr[0] = 0x00; | ||
428 | dev->dev_addr[1] = 0x80; | ||
429 | dev->dev_addr[2] = 0xc8; | ||
430 | dev->dev_addr[3] &= 0x0f; | ||
431 | dev->dev_addr[3] |= 0x70; | ||
432 | } else { | ||
433 | printk(" not identified in the printer port\n"); | ||
434 | goto out1; | ||
435 | } | ||
436 | |||
437 | printk(", Ethernet Address: %pM\n", dev->dev_addr); | ||
438 | |||
439 | dev->netdev_ops = &de600_netdev_ops; | ||
440 | |||
441 | dev->flags&=~IFF_MULTICAST; | ||
442 | |||
443 | select_prn(); | ||
444 | |||
445 | err = register_netdev(dev); | ||
446 | if (err) | ||
447 | goto out1; | ||
448 | |||
449 | return dev; | ||
450 | |||
451 | out1: | ||
452 | release_region(DE600_IO, 3); | ||
453 | out: | ||
454 | free_netdev(dev); | ||
455 | return ERR_PTR(err); | ||
456 | } | ||
457 | |||
458 | static int adapter_init(struct net_device *dev) | ||
459 | { | ||
460 | int i; | ||
461 | |||
462 | select_nic(); | ||
463 | rx_page = 0; /* used by RESET */ | ||
464 | de600_put_command(RESET); | ||
465 | de600_put_command(STOP_RESET); | ||
466 | |||
467 | /* Check if it is still there... */ | ||
468 | /* Get the some bytes of the adapter ethernet address from the ROM */ | ||
469 | de600_setup_address(NODE_ADDRESS, RW_ADDR); | ||
470 | de600_read_byte(READ_DATA, dev); | ||
471 | if ((de600_read_byte(READ_DATA, dev) != 0xde) || | ||
472 | (de600_read_byte(READ_DATA, dev) != 0x15)) { | ||
473 | /* was: if (de600_read_status(dev) & 0xf0) { */ | ||
474 | printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n"); | ||
475 | /* Goodbye, cruel world... */ | ||
476 | dev->flags &= ~IFF_UP; | ||
477 | de600_close(dev); | ||
478 | was_down = 1; | ||
479 | netif_stop_queue(dev); /* Transmit busy... */ | ||
480 | return 1; /* failed */ | ||
481 | } | ||
482 | |||
483 | if (was_down) { | ||
484 | printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name); | ||
485 | was_down = 0; | ||
486 | } | ||
487 | |||
488 | tx_fifo_in = 0; | ||
489 | tx_fifo_out = 0; | ||
490 | free_tx_pages = TX_PAGES; | ||
491 | |||
492 | |||
493 | /* set the ether address. */ | ||
494 | de600_setup_address(NODE_ADDRESS, RW_ADDR); | ||
495 | for (i = 0; i < ETH_ALEN; i++) | ||
496 | de600_put_byte(dev->dev_addr[i]); | ||
497 | |||
498 | /* where to start saving incoming packets */ | ||
499 | rx_page = RX_BP | RX_BASE_PAGE; | ||
500 | de600_setup_address(MEM_4K, RW_ADDR); | ||
501 | /* Enable receiver */ | ||
502 | de600_put_command(RX_ENABLE); | ||
503 | select_prn(); | ||
504 | |||
505 | netif_start_queue(dev); | ||
506 | |||
507 | return 0; /* OK */ | ||
508 | } | ||
509 | |||
510 | static struct net_device *de600_dev; | ||
511 | |||
512 | static int __init de600_init(void) | ||
513 | { | ||
514 | de600_dev = de600_probe(); | ||
515 | if (IS_ERR(de600_dev)) | ||
516 | return PTR_ERR(de600_dev); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static void __exit de600_exit(void) | ||
521 | { | ||
522 | unregister_netdev(de600_dev); | ||
523 | release_region(DE600_IO, 3); | ||
524 | free_netdev(de600_dev); | ||
525 | } | ||
526 | |||
527 | module_init(de600_init); | ||
528 | module_exit(de600_exit); | ||
529 | |||
530 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h new file mode 100644 index 000000000000..e80ecbabcf4e --- /dev/null +++ b/drivers/net/ethernet/dlink/de600.h | |||
@@ -0,0 +1,168 @@ | |||
1 | /************************************************** | ||
2 | * * | ||
3 | * Definition of D-Link Ethernet Pocket adapter * | ||
4 | * * | ||
5 | **************************************************/ | ||
6 | /* | ||
7 | * D-Link Ethernet pocket adapter ports | ||
8 | */ | ||
9 | /* | ||
10 | * OK, so I'm cheating, but there are an awful lot of | ||
11 | * reads and writes in order to get anything in and out | ||
12 | * of the DE-600 with 4 bits at a time in the parallel port, | ||
13 | * so every saved instruction really helps :-) | ||
14 | */ | ||
15 | |||
16 | #ifndef DE600_IO | ||
17 | #define DE600_IO 0x378 | ||
18 | #endif | ||
19 | |||
20 | #define DATA_PORT (DE600_IO) | ||
21 | #define STATUS_PORT (DE600_IO + 1) | ||
22 | #define COMMAND_PORT (DE600_IO + 2) | ||
23 | |||
24 | #ifndef DE600_IRQ | ||
25 | #define DE600_IRQ 7 | ||
26 | #endif | ||
27 | /* | ||
28 | * It really should look like this, and autoprobing as well... | ||
29 | * | ||
30 | #define DATA_PORT (dev->base_addr + 0) | ||
31 | #define STATUS_PORT (dev->base_addr + 1) | ||
32 | #define COMMAND_PORT (dev->base_addr + 2) | ||
33 | #define DE600_IRQ dev->irq | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * D-Link COMMAND_PORT commands | ||
38 | */ | ||
39 | #define SELECT_NIC 0x04 /* select Network Interface Card */ | ||
40 | #define SELECT_PRN 0x1c /* select Printer */ | ||
41 | #define NML_PRN 0xec /* normal Printer situation */ | ||
42 | #define IRQEN 0x10 /* enable IRQ line */ | ||
43 | |||
44 | /* | ||
45 | * D-Link STATUS_PORT | ||
46 | */ | ||
47 | #define RX_BUSY 0x80 | ||
48 | #define RX_GOOD 0x40 | ||
49 | #define TX_FAILED16 0x10 | ||
50 | #define TX_BUSY 0x08 | ||
51 | |||
52 | /* | ||
53 | * D-Link DATA_PORT commands | ||
54 | * command in low 4 bits | ||
55 | * data in high 4 bits | ||
56 | * select current data nibble with HI_NIBBLE bit | ||
57 | */ | ||
58 | #define WRITE_DATA 0x00 /* write memory */ | ||
59 | #define READ_DATA 0x01 /* read memory */ | ||
60 | #define STATUS 0x02 /* read status register */ | ||
61 | #define COMMAND 0x03 /* write command register (see COMMAND below) */ | ||
62 | #define NULL_COMMAND 0x04 /* null command */ | ||
63 | #define RX_LEN 0x05 /* read received packet length */ | ||
64 | #define TX_ADDR 0x06 /* set adapter transmit memory address */ | ||
65 | #define RW_ADDR 0x07 /* set adapter read/write memory address */ | ||
66 | #define HI_NIBBLE 0x08 /* read/write the high nibble of data, | ||
67 | or-ed with rest of command */ | ||
68 | |||
69 | /* | ||
70 | * command register, accessed through DATA_PORT with low bits = COMMAND | ||
71 | */ | ||
72 | #define RX_ALL 0x01 /* PROMISCUOUS */ | ||
73 | #define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */ | ||
74 | #define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */ | ||
75 | |||
76 | #define TX_ENABLE 0x04 /* bit 2 */ | ||
77 | #define RX_ENABLE 0x08 /* bit 3 */ | ||
78 | |||
79 | #define RESET 0x80 /* set bit 7 high */ | ||
80 | #define STOP_RESET 0x00 /* set bit 7 low */ | ||
81 | |||
82 | /* | ||
83 | * data to command register | ||
84 | * (high 4 bits in write to DATA_PORT) | ||
85 | */ | ||
86 | #define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */ | ||
87 | #define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */ | ||
88 | #define FLIP_IRQ 0x40 /* bit 6 */ | ||
89 | |||
90 | /* | ||
91 | * D-Link adapter internal memory: | ||
92 | * | ||
93 | * 0-2K 1:st transmit page (send from pointer up to 2K) | ||
94 | * 2-4K 2:nd transmit page (send from pointer up to 4K) | ||
95 | * | ||
96 | * 4-6K 1:st receive page (data from 4K upwards) | ||
97 | * 6-8K 2:nd receive page (data from 6K upwards) | ||
98 | * | ||
99 | * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address) | ||
100 | */ | ||
101 | #define MEM_2K 0x0800 /* 2048 */ | ||
102 | #define MEM_4K 0x1000 /* 4096 */ | ||
103 | #define MEM_6K 0x1800 /* 6144 */ | ||
104 | #define NODE_ADDRESS 0x2000 /* 8192 */ | ||
105 | |||
106 | #define RUNT 60 /* Too small Ethernet packet */ | ||
107 | |||
108 | /************************************************** | ||
109 | * * | ||
110 | * End of definition * | ||
111 | * * | ||
112 | **************************************************/ | ||
113 | |||
114 | /* | ||
115 | * Index to functions, as function prototypes. | ||
116 | */ | ||
117 | /* Routines used internally. (See "convenience macros") */ | ||
118 | static u8 de600_read_status(struct net_device *dev); | ||
119 | static u8 de600_read_byte(unsigned char type, struct net_device *dev); | ||
120 | |||
121 | /* Put in the device structure. */ | ||
122 | static int de600_open(struct net_device *dev); | ||
123 | static int de600_close(struct net_device *dev); | ||
124 | static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
125 | |||
126 | /* Dispatch from interrupts. */ | ||
127 | static irqreturn_t de600_interrupt(int irq, void *dev_id); | ||
128 | static int de600_tx_intr(struct net_device *dev, int irq_status); | ||
129 | static void de600_rx_intr(struct net_device *dev); | ||
130 | |||
131 | /* Initialization */ | ||
132 | static void trigger_interrupt(struct net_device *dev); | ||
133 | static int adapter_init(struct net_device *dev); | ||
134 | |||
135 | /* | ||
136 | * Convenience macros/functions for D-Link adapter | ||
137 | */ | ||
138 | |||
139 | #define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN | ||
140 | #define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN | ||
141 | |||
142 | /* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */ | ||
143 | #define de600_put_byte(data) ( \ | ||
144 | outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \ | ||
145 | outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT)) | ||
146 | |||
147 | /* | ||
148 | * The first two outb_p()'s below could perhaps be deleted if there | ||
149 | * would be more delay in the last two. Not certain about it yet... | ||
150 | */ | ||
151 | #define de600_put_command(cmd) ( \ | ||
152 | outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \ | ||
153 | outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \ | ||
154 | outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \ | ||
155 | outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT)) | ||
156 | |||
157 | #define de600_setup_address(addr,type) ( \ | ||
158 | outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \ | ||
159 | outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \ | ||
160 | outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \ | ||
161 | outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT)) | ||
162 | |||
163 | #define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K)) | ||
164 | |||
165 | /* Flip bit, only 2 pages */ | ||
166 | #define next_rx_page() (rx_page ^= RX_PAGE2_SELECT) | ||
167 | |||
168 | #define tx_page_adr(a) (((a) + 1) * MEM_2K) | ||
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c new file mode 100644 index 000000000000..1c51a7576119 --- /dev/null +++ b/drivers/net/ethernet/dlink/de620.c | |||
@@ -0,0 +1,988 @@ | |||
1 | /* | ||
2 | * de620.c $Revision: 1.40 $ BETA | ||
3 | * | ||
4 | * | ||
5 | * Linux driver for the D-Link DE-620 Ethernet pocket adapter. | ||
6 | * | ||
7 | * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se> | ||
8 | * | ||
9 | * Based on adapter information gathered from DOS packetdriver | ||
10 | * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.) | ||
11 | * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992 | ||
12 | * Copyright, 1988, Russell Nelson, Crynwr Software | ||
13 | * | ||
14 | * Adapted to the sample network driver core for linux, | ||
15 | * written by: Donald Becker <becker@super.org> | ||
16 | * (Now at <becker@scyld.com>) | ||
17 | * | ||
18 | * Valuable assistance from: | ||
19 | * J. Joshua Kopper <kopper@rtsg.mot.com> | ||
20 | * Olav Kvittem <Olav.Kvittem@uninett.no> | ||
21 | * Germano Caronni <caronni@nessie.cs.id.ethz.ch> | ||
22 | * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au> | ||
23 | * | ||
24 | *****************************************************************************/ | ||
25 | /* | ||
26 | * This program is free software; you can redistribute it and/or modify | ||
27 | * it under the terms of the GNU General Public License as published by | ||
28 | * the Free Software Foundation; either version 2, or (at your option) | ||
29 | * any later version. | ||
30 | * | ||
31 | * This program is distributed in the hope that it will be useful, | ||
32 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
33 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
34 | * GNU General Public License for more details. | ||
35 | * | ||
36 | * You should have received a copy of the GNU General Public License | ||
37 | * along with this program; if not, write to the Free Software | ||
38 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
39 | * | ||
40 | *****************************************************************************/ | ||
41 | static const char version[] = | ||
42 | "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n"; | ||
43 | |||
44 | /*********************************************************************** | ||
45 | * | ||
46 | * "Tuning" section. | ||
47 | * | ||
48 | * Compile-time options: (see below for descriptions) | ||
49 | * -DDE620_IO=0x378 (lpt1) | ||
50 | * -DDE620_IRQ=7 (lpt1) | ||
51 | * -DSHUTDOWN_WHEN_LOST | ||
52 | * -DCOUNT_LOOPS | ||
53 | * -DLOWSPEED | ||
54 | * -DREAD_DELAY | ||
55 | * -DWRITE_DELAY | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * This driver assumes that the printer port is a "normal", | ||
60 | * dumb, uni-directional port! | ||
61 | * If your port is "fancy" in any way, please try to set it to "normal" | ||
62 | * with your BIOS setup. I have no access to machines with bi-directional | ||
63 | * ports, so I can't test such a driver :-( | ||
64 | * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...) | ||
65 | * | ||
66 | * There are some clones of DE620 out there, with different names. | ||
67 | * If the current driver does not recognize a clone, try to change | ||
68 | * the following #define to: | ||
69 | * | ||
70 | * #define DE620_CLONE 1 | ||
71 | */ | ||
72 | #define DE620_CLONE 0 | ||
73 | |||
74 | /* | ||
75 | * If the adapter has problems with high speeds, enable this #define | ||
76 | * otherwise full printerport speed will be attempted. | ||
77 | * | ||
78 | * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED | ||
79 | * | ||
80 | #define LOWSPEED | ||
81 | */ | ||
82 | |||
83 | #ifndef READ_DELAY | ||
84 | #define READ_DELAY 100 /* adapter internal read delay in 100ns units */ | ||
85 | #endif | ||
86 | |||
87 | #ifndef WRITE_DELAY | ||
88 | #define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */ | ||
89 | #endif | ||
90 | |||
91 | /* | ||
92 | * Enable this #define if you want the adapter to do a "ifconfig down" on | ||
93 | * itself when we have detected that something is possibly wrong with it. | ||
94 | * The default behaviour is to retry with "adapter_init()" until success. | ||
95 | * This should be used for debugging purposes only. | ||
96 | * | ||
97 | #define SHUTDOWN_WHEN_LOST | ||
98 | */ | ||
99 | |||
100 | #ifdef LOWSPEED | ||
101 | /* | ||
102 | * Enable this #define if you want to see debugging output that show how long | ||
103 | * we have to wait before the DE-620 is ready for the next read/write/command. | ||
104 | * | ||
105 | #define COUNT_LOOPS | ||
106 | */ | ||
107 | #endif | ||
108 | |||
109 | #include <linux/module.h> | ||
110 | #include <linux/kernel.h> | ||
111 | #include <linux/types.h> | ||
112 | #include <linux/fcntl.h> | ||
113 | #include <linux/string.h> | ||
114 | #include <linux/interrupt.h> | ||
115 | #include <linux/ioport.h> | ||
116 | #include <linux/in.h> | ||
117 | #include <linux/errno.h> | ||
118 | #include <linux/init.h> | ||
119 | #include <linux/inet.h> | ||
120 | #include <linux/netdevice.h> | ||
121 | #include <linux/etherdevice.h> | ||
122 | #include <linux/skbuff.h> | ||
123 | |||
124 | #include <asm/io.h> | ||
125 | #include <asm/system.h> | ||
126 | |||
127 | /* Constant definitions for the DE-620 registers, commands and bits */ | ||
128 | #include "de620.h" | ||
129 | |||
130 | typedef unsigned char byte; | ||
131 | |||
132 | /******************************************************* | ||
133 | * * | ||
134 | * Definition of D-Link DE-620 Ethernet Pocket adapter * | ||
135 | * See also "de620.h" * | ||
136 | * * | ||
137 | *******************************************************/ | ||
138 | #ifndef DE620_IO /* Compile-time configurable */ | ||
139 | #define DE620_IO 0x378 | ||
140 | #endif | ||
141 | |||
142 | #ifndef DE620_IRQ /* Compile-time configurable */ | ||
143 | #define DE620_IRQ 7 | ||
144 | #endif | ||
145 | |||
146 | #define DATA_PORT (dev->base_addr) | ||
147 | #define STATUS_PORT (dev->base_addr + 1) | ||
148 | #define COMMAND_PORT (dev->base_addr + 2) | ||
149 | |||
150 | #define RUNT 60 /* Too small Ethernet packet */ | ||
151 | #define GIANT 1514 /* largest legal size packet, no fcs */ | ||
152 | |||
153 | /* | ||
154 | * Force media with insmod: | ||
155 | * insmod de620.o bnc=1 | ||
156 | * or | ||
157 | * insmod de620.o utp=1 | ||
158 | * | ||
159 | * Force io and/or irq with insmod: | ||
160 | * insmod de620.o io=0x378 irq=7 | ||
161 | * | ||
162 | * Make a clone skip the Ethernet-address range check: | ||
163 | * insmod de620.o clone=1 | ||
164 | */ | ||
165 | static int bnc; | ||
166 | static int utp; | ||
167 | static int io = DE620_IO; | ||
168 | static int irq = DE620_IRQ; | ||
169 | static int clone = DE620_CLONE; | ||
170 | |||
171 | static spinlock_t de620_lock; | ||
172 | |||
173 | module_param(bnc, int, 0); | ||
174 | module_param(utp, int, 0); | ||
175 | module_param(io, int, 0); | ||
176 | module_param(irq, int, 0); | ||
177 | module_param(clone, int, 0); | ||
178 | MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)"); | ||
179 | MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)"); | ||
180 | MODULE_PARM_DESC(io, "DE-620 I/O base address,required"); | ||
181 | MODULE_PARM_DESC(irq, "DE-620 IRQ number,required"); | ||
182 | MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)"); | ||
183 | |||
184 | /*********************************************** | ||
185 | * * | ||
186 | * Index to functions, as function prototypes. * | ||
187 | * * | ||
188 | ***********************************************/ | ||
189 | |||
190 | /* | ||
191 | * Routines used internally. (See also "convenience macros.. below") | ||
192 | */ | ||
193 | |||
194 | /* Put in the device structure. */ | ||
195 | static int de620_open(struct net_device *); | ||
196 | static int de620_close(struct net_device *); | ||
197 | static void de620_set_multicast_list(struct net_device *); | ||
198 | static int de620_start_xmit(struct sk_buff *, struct net_device *); | ||
199 | |||
200 | /* Dispatch from interrupts. */ | ||
201 | static irqreturn_t de620_interrupt(int, void *); | ||
202 | static int de620_rx_intr(struct net_device *); | ||
203 | |||
204 | /* Initialization */ | ||
205 | static int adapter_init(struct net_device *); | ||
206 | static int read_eeprom(struct net_device *); | ||
207 | |||
208 | |||
209 | /* | ||
210 | * D-Link driver variables: | ||
211 | */ | ||
212 | #define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX | ||
213 | #define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */ | ||
214 | #define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */ | ||
215 | #define DEF_NIC_CMD IRQEN | ICEN | DS1 | ||
216 | |||
217 | static volatile byte NIC_Cmd; | ||
218 | static volatile byte next_rx_page; | ||
219 | static byte first_rx_page; | ||
220 | static byte last_rx_page; | ||
221 | static byte EIPRegister; | ||
222 | |||
223 | static struct nic { | ||
224 | byte NodeID[6]; | ||
225 | byte RAM_Size; | ||
226 | byte Model; | ||
227 | byte Media; | ||
228 | byte SCR; | ||
229 | } nic_data; | ||
230 | |||
231 | /********************************************************** | ||
232 | * * | ||
233 | * Convenience macros/functions for D-Link DE-620 adapter * | ||
234 | * * | ||
235 | **********************************************************/ | ||
236 | #define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1)) | ||
237 | #define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT); | ||
238 | |||
239 | /* Check for ready-status, and return a nibble (high 4 bits) for data input */ | ||
240 | #ifdef COUNT_LOOPS | ||
241 | static int tot_cnt; | ||
242 | #endif | ||
243 | static inline byte | ||
244 | de620_ready(struct net_device *dev) | ||
245 | { | ||
246 | byte value; | ||
247 | register short int cnt = 0; | ||
248 | |||
249 | while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000)) | ||
250 | ++cnt; | ||
251 | |||
252 | #ifdef COUNT_LOOPS | ||
253 | tot_cnt += cnt; | ||
254 | #endif | ||
255 | return value & 0xf0; /* nibble */ | ||
256 | } | ||
257 | |||
258 | static inline void | ||
259 | de620_send_command(struct net_device *dev, byte cmd) | ||
260 | { | ||
261 | de620_ready(dev); | ||
262 | if (cmd == W_DUMMY) | ||
263 | outb(NIC_Cmd, COMMAND_PORT); | ||
264 | |||
265 | outb(cmd, DATA_PORT); | ||
266 | |||
267 | outb(NIC_Cmd ^ CS0, COMMAND_PORT); | ||
268 | de620_ready(dev); | ||
269 | outb(NIC_Cmd, COMMAND_PORT); | ||
270 | } | ||
271 | |||
272 | static inline void | ||
273 | de620_put_byte(struct net_device *dev, byte value) | ||
274 | { | ||
275 | /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */ | ||
276 | de620_ready(dev); | ||
277 | outb(value, DATA_PORT); | ||
278 | de620_flip_ds(dev); | ||
279 | } | ||
280 | |||
281 | static inline byte | ||
282 | de620_read_byte(struct net_device *dev) | ||
283 | { | ||
284 | byte value; | ||
285 | |||
286 | /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */ | ||
287 | value = de620_ready(dev); /* High nibble */ | ||
288 | de620_flip_ds(dev); | ||
289 | value |= de620_ready(dev) >> 4; /* Low nibble */ | ||
290 | return value; | ||
291 | } | ||
292 | |||
293 | static inline void | ||
294 | de620_write_block(struct net_device *dev, byte *buffer, int count, int pad) | ||
295 | { | ||
296 | #ifndef LOWSPEED | ||
297 | byte uflip = NIC_Cmd ^ (DS0 | DS1); | ||
298 | byte dflip = NIC_Cmd; | ||
299 | #else /* LOWSPEED */ | ||
300 | #ifdef COUNT_LOOPS | ||
301 | int bytes = count; | ||
302 | #endif /* COUNT_LOOPS */ | ||
303 | #endif /* LOWSPEED */ | ||
304 | |||
305 | #ifdef LOWSPEED | ||
306 | #ifdef COUNT_LOOPS | ||
307 | tot_cnt = 0; | ||
308 | #endif /* COUNT_LOOPS */ | ||
309 | /* No further optimization useful, the limit is in the adapter. */ | ||
310 | for ( ; count > 0; --count, ++buffer) { | ||
311 | de620_put_byte(dev,*buffer); | ||
312 | } | ||
313 | for ( count = pad ; count > 0; --count, ++buffer) { | ||
314 | de620_put_byte(dev, 0); | ||
315 | } | ||
316 | de620_send_command(dev,W_DUMMY); | ||
317 | #ifdef COUNT_LOOPS | ||
318 | /* trial debug output: loops per byte in de620_ready() */ | ||
319 | printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1))); | ||
320 | #endif /* COUNT_LOOPS */ | ||
321 | #else /* not LOWSPEED */ | ||
322 | for ( ; count > 0; count -=2) { | ||
323 | outb(*buffer++, DATA_PORT); | ||
324 | outb(uflip, COMMAND_PORT); | ||
325 | outb(*buffer++, DATA_PORT); | ||
326 | outb(dflip, COMMAND_PORT); | ||
327 | } | ||
328 | de620_send_command(dev,W_DUMMY); | ||
329 | #endif /* LOWSPEED */ | ||
330 | } | ||
331 | |||
332 | static inline void | ||
333 | de620_read_block(struct net_device *dev, byte *data, int count) | ||
334 | { | ||
335 | #ifndef LOWSPEED | ||
336 | byte value; | ||
337 | byte uflip = NIC_Cmd ^ (DS0 | DS1); | ||
338 | byte dflip = NIC_Cmd; | ||
339 | #else /* LOWSPEED */ | ||
340 | #ifdef COUNT_LOOPS | ||
341 | int bytes = count; | ||
342 | |||
343 | tot_cnt = 0; | ||
344 | #endif /* COUNT_LOOPS */ | ||
345 | #endif /* LOWSPEED */ | ||
346 | |||
347 | #ifdef LOWSPEED | ||
348 | /* No further optimization useful, the limit is in the adapter. */ | ||
349 | while (count-- > 0) { | ||
350 | *data++ = de620_read_byte(dev); | ||
351 | de620_flip_ds(dev); | ||
352 | } | ||
353 | #ifdef COUNT_LOOPS | ||
354 | /* trial debug output: loops per byte in de620_ready() */ | ||
355 | printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1))); | ||
356 | #endif /* COUNT_LOOPS */ | ||
357 | #else /* not LOWSPEED */ | ||
358 | while (count-- > 0) { | ||
359 | value = inb(STATUS_PORT) & 0xf0; /* High nibble */ | ||
360 | outb(uflip, COMMAND_PORT); | ||
361 | *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */ | ||
362 | outb(dflip , COMMAND_PORT); | ||
363 | } | ||
364 | #endif /* LOWSPEED */ | ||
365 | } | ||
366 | |||
367 | static inline void | ||
368 | de620_set_delay(struct net_device *dev) | ||
369 | { | ||
370 | de620_ready(dev); | ||
371 | outb(W_DFR, DATA_PORT); | ||
372 | outb(NIC_Cmd ^ CS0, COMMAND_PORT); | ||
373 | |||
374 | de620_ready(dev); | ||
375 | #ifdef LOWSPEED | ||
376 | outb(WRITE_DELAY, DATA_PORT); | ||
377 | #else | ||
378 | outb(0, DATA_PORT); | ||
379 | #endif | ||
380 | de620_flip_ds(dev); | ||
381 | |||
382 | de620_ready(dev); | ||
383 | #ifdef LOWSPEED | ||
384 | outb(READ_DELAY, DATA_PORT); | ||
385 | #else | ||
386 | outb(0, DATA_PORT); | ||
387 | #endif | ||
388 | de620_flip_ds(dev); | ||
389 | } | ||
390 | |||
391 | static inline void | ||
392 | de620_set_register(struct net_device *dev, byte reg, byte value) | ||
393 | { | ||
394 | de620_ready(dev); | ||
395 | outb(reg, DATA_PORT); | ||
396 | outb(NIC_Cmd ^ CS0, COMMAND_PORT); | ||
397 | |||
398 | de620_put_byte(dev, value); | ||
399 | } | ||
400 | |||
401 | static inline byte | ||
402 | de620_get_register(struct net_device *dev, byte reg) | ||
403 | { | ||
404 | byte value; | ||
405 | |||
406 | de620_send_command(dev,reg); | ||
407 | value = de620_read_byte(dev); | ||
408 | de620_send_command(dev,W_DUMMY); | ||
409 | |||
410 | return value; | ||
411 | } | ||
412 | |||
413 | /********************************************************************* | ||
414 | * | ||
415 | * Open/initialize the board. | ||
416 | * | ||
417 | * This routine should set everything up anew at each open, even | ||
418 | * registers that "should" only need to be set once at boot, so that | ||
419 | * there is a non-reboot way to recover if something goes wrong. | ||
420 | * | ||
421 | */ | ||
422 | static int de620_open(struct net_device *dev) | ||
423 | { | ||
424 | int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev); | ||
425 | if (ret) { | ||
426 | printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); | ||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | if (adapter_init(dev)) { | ||
431 | ret = -EIO; | ||
432 | goto out_free_irq; | ||
433 | } | ||
434 | |||
435 | netif_start_queue(dev); | ||
436 | return 0; | ||
437 | |||
438 | out_free_irq: | ||
439 | free_irq(dev->irq, dev); | ||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | /************************************************ | ||
444 | * | ||
445 | * The inverse routine to de620_open(). | ||
446 | * | ||
447 | */ | ||
448 | |||
449 | static int de620_close(struct net_device *dev) | ||
450 | { | ||
451 | netif_stop_queue(dev); | ||
452 | /* disable recv */ | ||
453 | de620_set_register(dev, W_TCR, RXOFF); | ||
454 | free_irq(dev->irq, dev); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | /********************************************* | ||
459 | * | ||
460 | * Set or clear the multicast filter for this adaptor. | ||
461 | * (no real multicast implemented for the DE-620, but she can be promiscuous...) | ||
462 | * | ||
463 | */ | ||
464 | |||
465 | static void de620_set_multicast_list(struct net_device *dev) | ||
466 | { | ||
467 | if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) | ||
468 | { /* Enable promiscuous mode */ | ||
469 | de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); | ||
470 | } | ||
471 | else | ||
472 | { /* Disable promiscuous mode, use normal mode */ | ||
473 | de620_set_register(dev, W_TCR, TCR_DEF); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | /******************************************************* | ||
478 | * | ||
479 | * Handle timeouts on transmit | ||
480 | */ | ||
481 | |||
482 | static void de620_timeout(struct net_device *dev) | ||
483 | { | ||
484 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); | ||
485 | /* Restart the adapter. */ | ||
486 | if (!adapter_init(dev)) /* maybe close it */ | ||
487 | netif_wake_queue(dev); | ||
488 | } | ||
489 | |||
490 | /******************************************************* | ||
491 | * | ||
492 | * Copy a buffer to the adapter transmit page memory. | ||
493 | * Start sending. | ||
494 | */ | ||
495 | static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
496 | { | ||
497 | unsigned long flags; | ||
498 | int len; | ||
499 | byte *buffer = skb->data; | ||
500 | byte using_txbuf; | ||
501 | |||
502 | using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */ | ||
503 | |||
504 | netif_stop_queue(dev); | ||
505 | |||
506 | |||
507 | if ((len = skb->len) < RUNT) | ||
508 | len = RUNT; | ||
509 | if (len & 1) /* send an even number of bytes */ | ||
510 | ++len; | ||
511 | |||
512 | /* Start real output */ | ||
513 | |||
514 | spin_lock_irqsave(&de620_lock, flags); | ||
515 | pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n", | ||
516 | (int)skb->len, using_txbuf); | ||
517 | |||
518 | /* select a free tx buffer. if there is one... */ | ||
519 | switch (using_txbuf) { | ||
520 | default: /* both are free: use TXBF0 */ | ||
521 | case TXBF1: /* use TXBF0 */ | ||
522 | de620_send_command(dev,W_CR | RW0); | ||
523 | using_txbuf |= TXBF0; | ||
524 | break; | ||
525 | |||
526 | case TXBF0: /* use TXBF1 */ | ||
527 | de620_send_command(dev,W_CR | RW1); | ||
528 | using_txbuf |= TXBF1; | ||
529 | break; | ||
530 | |||
531 | case (TXBF0 | TXBF1): /* NONE!!! */ | ||
532 | printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name); | ||
533 | spin_unlock_irqrestore(&de620_lock, flags); | ||
534 | return NETDEV_TX_BUSY; | ||
535 | } | ||
536 | de620_write_block(dev, buffer, skb->len, len-skb->len); | ||
537 | |||
538 | if(!(using_txbuf == (TXBF0 | TXBF1))) | ||
539 | netif_wake_queue(dev); | ||
540 | |||
541 | dev->stats.tx_packets++; | ||
542 | spin_unlock_irqrestore(&de620_lock, flags); | ||
543 | dev_kfree_skb (skb); | ||
544 | return NETDEV_TX_OK; | ||
545 | } | ||
546 | |||
547 | /***************************************************** | ||
548 | * | ||
549 | * Handle the network interface interrupts. | ||
550 | * | ||
551 | */ | ||
552 | static irqreturn_t | ||
553 | de620_interrupt(int irq_in, void *dev_id) | ||
554 | { | ||
555 | struct net_device *dev = dev_id; | ||
556 | byte irq_status; | ||
557 | int bogus_count = 0; | ||
558 | int again = 0; | ||
559 | |||
560 | spin_lock(&de620_lock); | ||
561 | |||
562 | /* Read the status register (_not_ the status port) */ | ||
563 | irq_status = de620_get_register(dev, R_STS); | ||
564 | |||
565 | pr_debug("de620_interrupt (%2.2X)\n", irq_status); | ||
566 | |||
567 | if (irq_status & RXGOOD) { | ||
568 | do { | ||
569 | again = de620_rx_intr(dev); | ||
570 | pr_debug("again=%d\n", again); | ||
571 | } | ||
572 | while (again && (++bogus_count < 100)); | ||
573 | } | ||
574 | |||
575 | if(de620_tx_buffs(dev) != (TXBF0 | TXBF1)) | ||
576 | netif_wake_queue(dev); | ||
577 | |||
578 | spin_unlock(&de620_lock); | ||
579 | return IRQ_HANDLED; | ||
580 | } | ||
581 | |||
582 | /************************************** | ||
583 | * | ||
584 | * Get a packet from the adapter | ||
585 | * | ||
586 | * Send it "upstairs" | ||
587 | * | ||
588 | */ | ||
589 | static int de620_rx_intr(struct net_device *dev) | ||
590 | { | ||
591 | struct header_buf { | ||
592 | byte status; | ||
593 | byte Rx_NextPage; | ||
594 | unsigned short Rx_ByteCount; | ||
595 | } header_buf; | ||
596 | struct sk_buff *skb; | ||
597 | int size; | ||
598 | byte *buffer; | ||
599 | byte pagelink; | ||
600 | byte curr_page; | ||
601 | |||
602 | pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page); | ||
603 | |||
604 | /* Tell the adapter that we are going to read data, and from where */ | ||
605 | de620_send_command(dev, W_CR | RRN); | ||
606 | de620_set_register(dev, W_RSA1, next_rx_page); | ||
607 | de620_set_register(dev, W_RSA0, 0); | ||
608 | |||
609 | /* Deep breath, and away we goooooo */ | ||
610 | de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf)); | ||
611 | pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n", | ||
612 | header_buf.status, header_buf.Rx_NextPage, | ||
613 | header_buf.Rx_ByteCount); | ||
614 | |||
615 | /* Plausible page header? */ | ||
616 | pagelink = header_buf.Rx_NextPage; | ||
617 | if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) { | ||
618 | /* Ouch... Forget it! Skip all and start afresh... */ | ||
619 | printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name); | ||
620 | /* You win some, you lose some. And sometimes plenty... */ | ||
621 | adapter_init(dev); | ||
622 | netif_wake_queue(dev); | ||
623 | dev->stats.rx_over_errors++; | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | /* OK, this look good, so far. Let's see if it's consistent... */ | ||
628 | /* Let's compute the start of the next packet, based on where we are */ | ||
629 | pagelink = next_rx_page + | ||
630 | ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8); | ||
631 | |||
632 | /* Are we going to wrap around the page counter? */ | ||
633 | if (pagelink > last_rx_page) | ||
634 | pagelink -= (last_rx_page - first_rx_page + 1); | ||
635 | |||
636 | /* Is the _computed_ next page number equal to what the adapter says? */ | ||
637 | if (pagelink != header_buf.Rx_NextPage) { | ||
638 | /* Naah, we'll skip this packet. Probably bogus data as well */ | ||
639 | printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name); | ||
640 | next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ | ||
641 | de620_send_command(dev, W_DUMMY); | ||
642 | de620_set_register(dev, W_NPRF, next_rx_page); | ||
643 | dev->stats.rx_over_errors++; | ||
644 | return 0; | ||
645 | } | ||
646 | next_rx_page = pagelink; | ||
647 | |||
648 | size = header_buf.Rx_ByteCount - 4; | ||
649 | if ((size < RUNT) || (GIANT < size)) { | ||
650 | printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size); | ||
651 | } | ||
652 | else { /* Good packet? */ | ||
653 | skb = dev_alloc_skb(size+2); | ||
654 | if (skb == NULL) { /* Yeah, but no place to put it... */ | ||
655 | printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); | ||
656 | dev->stats.rx_dropped++; | ||
657 | } | ||
658 | else { /* Yep! Go get it! */ | ||
659 | skb_reserve(skb,2); /* Align */ | ||
660 | /* skb->data points to the start of sk_buff data area */ | ||
661 | buffer = skb_put(skb,size); | ||
662 | /* copy the packet into the buffer */ | ||
663 | de620_read_block(dev, buffer, size); | ||
664 | pr_debug("Read %d bytes\n", size); | ||
665 | skb->protocol=eth_type_trans(skb,dev); | ||
666 | netif_rx(skb); /* deliver it "upstairs" */ | ||
667 | /* count all receives */ | ||
668 | dev->stats.rx_packets++; | ||
669 | dev->stats.rx_bytes += size; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | /* Let's peek ahead to see if we have read the last current packet */ | ||
674 | /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */ | ||
675 | curr_page = de620_get_register(dev, R_CPR); | ||
676 | de620_set_register(dev, W_NPRF, next_rx_page); | ||
677 | pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page); | ||
678 | |||
679 | return next_rx_page != curr_page; /* That was slightly tricky... */ | ||
680 | } | ||
681 | |||
682 | /********************************************* | ||
683 | * | ||
684 | * Reset the adapter to a known state | ||
685 | * | ||
686 | */ | ||
687 | static int adapter_init(struct net_device *dev) | ||
688 | { | ||
689 | int i; | ||
690 | static int was_down; | ||
691 | |||
692 | if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */ | ||
693 | EIPRegister = NCTL0; | ||
694 | if (nic_data.Media != 1) | ||
695 | EIPRegister |= NIS0; /* not BNC */ | ||
696 | } | ||
697 | else if (nic_data.Model == 2) { /* UTP */ | ||
698 | EIPRegister = NCTL0 | NIS0; | ||
699 | } | ||
700 | |||
701 | if (utp) | ||
702 | EIPRegister = NCTL0 | NIS0; | ||
703 | if (bnc) | ||
704 | EIPRegister = NCTL0; | ||
705 | |||
706 | de620_send_command(dev, W_CR | RNOP | CLEAR); | ||
707 | de620_send_command(dev, W_CR | RNOP); | ||
708 | |||
709 | de620_set_register(dev, W_SCR, SCR_DEF); | ||
710 | /* disable recv to wait init */ | ||
711 | de620_set_register(dev, W_TCR, RXOFF); | ||
712 | |||
713 | /* Set the node ID in the adapter */ | ||
714 | for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */ | ||
715 | de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]); | ||
716 | } | ||
717 | |||
718 | de620_set_register(dev, W_EIP, EIPRegister); | ||
719 | |||
720 | next_rx_page = first_rx_page = DE620_RX_START_PAGE; | ||
721 | if (nic_data.RAM_Size) | ||
722 | last_rx_page = nic_data.RAM_Size - 1; | ||
723 | else /* 64k RAM */ | ||
724 | last_rx_page = 255; | ||
725 | |||
726 | de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/ | ||
727 | de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */ | ||
728 | de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/ | ||
729 | de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/ | ||
730 | de620_send_command(dev, W_DUMMY); | ||
731 | de620_set_delay(dev); | ||
732 | |||
733 | /* Final sanity check: Anybody out there? */ | ||
734 | /* Let's hope some bits from the statusregister make a good check */ | ||
735 | #define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 ) | ||
736 | #define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 ) | ||
737 | /* success: X 0 0 X 0 0 X X */ | ||
738 | /* ignore: EEDI RXGOOD COLS LNKS*/ | ||
739 | |||
740 | if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) { | ||
741 | printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it" | ||
742 | #ifdef SHUTDOWN_WHEN_LOST | ||
743 | " and do a new ifconfig" | ||
744 | #endif | ||
745 | "! (%02x)\n", dev->name, i); | ||
746 | #ifdef SHUTDOWN_WHEN_LOST | ||
747 | /* Goodbye, cruel world... */ | ||
748 | dev->flags &= ~IFF_UP; | ||
749 | de620_close(dev); | ||
750 | #endif | ||
751 | was_down = 1; | ||
752 | return 1; /* failed */ | ||
753 | } | ||
754 | if (was_down) { | ||
755 | printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name); | ||
756 | was_down = 0; | ||
757 | } | ||
758 | |||
759 | /* All OK, go ahead... */ | ||
760 | de620_set_register(dev, W_TCR, TCR_DEF); | ||
761 | |||
762 | return 0; /* all ok */ | ||
763 | } | ||
764 | |||
765 | static const struct net_device_ops de620_netdev_ops = { | ||
766 | .ndo_open = de620_open, | ||
767 | .ndo_stop = de620_close, | ||
768 | .ndo_start_xmit = de620_start_xmit, | ||
769 | .ndo_tx_timeout = de620_timeout, | ||
770 | .ndo_set_multicast_list = de620_set_multicast_list, | ||
771 | .ndo_change_mtu = eth_change_mtu, | ||
772 | .ndo_set_mac_address = eth_mac_addr, | ||
773 | .ndo_validate_addr = eth_validate_addr, | ||
774 | }; | ||
775 | |||
776 | /****************************************************************************** | ||
777 | * | ||
778 | * Only start-up code below | ||
779 | * | ||
780 | */ | ||
781 | /**************************************** | ||
782 | * | ||
783 | * Check if there is a DE-620 connected | ||
784 | */ | ||
785 | struct net_device * __init de620_probe(int unit) | ||
786 | { | ||
787 | byte checkbyte = 0xa5; | ||
788 | struct net_device *dev; | ||
789 | int err = -ENOMEM; | ||
790 | int i; | ||
791 | |||
792 | dev = alloc_etherdev(0); | ||
793 | if (!dev) | ||
794 | goto out; | ||
795 | |||
796 | spin_lock_init(&de620_lock); | ||
797 | |||
798 | /* | ||
799 | * This is where the base_addr and irq gets set. | ||
800 | * Tunable at compile-time and insmod-time | ||
801 | */ | ||
802 | dev->base_addr = io; | ||
803 | dev->irq = irq; | ||
804 | |||
805 | /* allow overriding parameters on command line */ | ||
806 | if (unit >= 0) { | ||
807 | sprintf(dev->name, "eth%d", unit); | ||
808 | netdev_boot_setup_check(dev); | ||
809 | } | ||
810 | |||
811 | pr_debug("%s", version); | ||
812 | |||
813 | printk(KERN_INFO "D-Link DE-620 pocket adapter"); | ||
814 | |||
815 | if (!request_region(dev->base_addr, 3, "de620")) { | ||
816 | printk(" io 0x%3lX, which is busy.\n", dev->base_addr); | ||
817 | err = -EBUSY; | ||
818 | goto out1; | ||
819 | } | ||
820 | |||
821 | /* Initially, configure basic nibble mode, so we can read the EEPROM */ | ||
822 | NIC_Cmd = DEF_NIC_CMD; | ||
823 | de620_set_register(dev, W_EIP, EIPRegister); | ||
824 | |||
825 | /* Anybody out there? */ | ||
826 | de620_set_register(dev, W_CPR, checkbyte); | ||
827 | checkbyte = de620_get_register(dev, R_CPR); | ||
828 | |||
829 | if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) { | ||
830 | printk(" not identified in the printer port\n"); | ||
831 | err = -ENODEV; | ||
832 | goto out2; | ||
833 | } | ||
834 | |||
835 | /* else, got it! */ | ||
836 | dev->dev_addr[0] = nic_data.NodeID[0]; | ||
837 | for (i = 1; i < ETH_ALEN; i++) { | ||
838 | dev->dev_addr[i] = nic_data.NodeID[i]; | ||
839 | dev->broadcast[i] = 0xff; | ||
840 | } | ||
841 | |||
842 | printk(", Ethernet Address: %pM", dev->dev_addr); | ||
843 | |||
844 | printk(" (%dk RAM,", | ||
845 | (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64); | ||
846 | |||
847 | if (nic_data.Media == 1) | ||
848 | printk(" BNC)\n"); | ||
849 | else | ||
850 | printk(" UTP)\n"); | ||
851 | |||
852 | dev->netdev_ops = &de620_netdev_ops; | ||
853 | dev->watchdog_timeo = HZ*2; | ||
854 | |||
855 | /* base_addr and irq are already set, see above! */ | ||
856 | |||
857 | /* dump eeprom */ | ||
858 | pr_debug("\nEEPROM contents:\n" | ||
859 | "RAM_Size = 0x%02X\n" | ||
860 | "NodeID = %pM\n" | ||
861 | "Model = %d\n" | ||
862 | "Media = %d\n" | ||
863 | "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID, | ||
864 | nic_data.Model, nic_data.Media, nic_data.SCR); | ||
865 | |||
866 | err = register_netdev(dev); | ||
867 | if (err) | ||
868 | goto out2; | ||
869 | return dev; | ||
870 | |||
871 | out2: | ||
872 | release_region(dev->base_addr, 3); | ||
873 | out1: | ||
874 | free_netdev(dev); | ||
875 | out: | ||
876 | return ERR_PTR(err); | ||
877 | } | ||
878 | |||
879 | /********************************** | ||
880 | * | ||
881 | * Read info from on-board EEPROM | ||
882 | * | ||
883 | * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_! | ||
884 | */ | ||
885 | #define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister); | ||
886 | |||
887 | static unsigned short __init ReadAWord(struct net_device *dev, int from) | ||
888 | { | ||
889 | unsigned short data; | ||
890 | int nbits; | ||
891 | |||
892 | /* cs [__~~] SET SEND STATE */ | ||
893 | /* di [____] */ | ||
894 | /* sck [_~~_] */ | ||
895 | sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4); | ||
896 | |||
897 | /* Send the 9-bit address from where we want to read the 16-bit word */ | ||
898 | for (nbits = 9; nbits > 0; --nbits, from <<= 1) { | ||
899 | if (from & 0x0100) { /* bit set? */ | ||
900 | /* cs [~~~~] SEND 1 */ | ||
901 | /* di [~~~~] */ | ||
902 | /* sck [_~~_] */ | ||
903 | sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6); | ||
904 | } | ||
905 | else { | ||
906 | /* cs [~~~~] SEND 0 */ | ||
907 | /* di [____] */ | ||
908 | /* sck [_~~_] */ | ||
909 | sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4); | ||
910 | } | ||
911 | } | ||
912 | |||
913 | /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */ | ||
914 | for (data = 0, nbits = 16; nbits > 0; --nbits) { | ||
915 | /* cs [~~~~] SEND 0 */ | ||
916 | /* di [____] */ | ||
917 | /* sck [_~~_] */ | ||
918 | sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4); | ||
919 | data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7); | ||
920 | } | ||
921 | /* cs [____] RESET SEND STATE */ | ||
922 | /* di [____] */ | ||
923 | /* sck [_~~_] */ | ||
924 | sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0); | ||
925 | |||
926 | return data; | ||
927 | } | ||
928 | |||
929 | static int __init read_eeprom(struct net_device *dev) | ||
930 | { | ||
931 | unsigned short wrd; | ||
932 | |||
933 | /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */ | ||
934 | wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */ | ||
935 | if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */ | ||
936 | return -1; /* Nope, not a DE-620 */ | ||
937 | nic_data.NodeID[0] = wrd & 0xff; | ||
938 | nic_data.NodeID[1] = wrd >> 8; | ||
939 | |||
940 | wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */ | ||
941 | if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */ | ||
942 | return -1; /* Nope, not a DE-620 */ | ||
943 | nic_data.NodeID[2] = wrd & 0xff; | ||
944 | nic_data.NodeID[3] = wrd >> 8; | ||
945 | |||
946 | wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */ | ||
947 | nic_data.NodeID[4] = wrd & 0xff; | ||
948 | nic_data.NodeID[5] = wrd >> 8; | ||
949 | |||
950 | wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */ | ||
951 | nic_data.RAM_Size = (wrd >> 8); | ||
952 | |||
953 | wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */ | ||
954 | nic_data.Model = (wrd & 0xff); | ||
955 | |||
956 | wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */ | ||
957 | nic_data.Media = (wrd & 0xff); | ||
958 | |||
959 | wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */ | ||
960 | nic_data.SCR = (wrd >> 8); | ||
961 | |||
962 | return 0; /* no errors */ | ||
963 | } | ||
964 | |||
965 | /****************************************************************************** | ||
966 | * | ||
967 | * Loadable module skeleton | ||
968 | * | ||
969 | */ | ||
970 | #ifdef MODULE | ||
971 | static struct net_device *de620_dev; | ||
972 | |||
973 | int __init init_module(void) | ||
974 | { | ||
975 | de620_dev = de620_probe(-1); | ||
976 | if (IS_ERR(de620_dev)) | ||
977 | return PTR_ERR(de620_dev); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | void cleanup_module(void) | ||
982 | { | ||
983 | unregister_netdev(de620_dev); | ||
984 | release_region(de620_dev->base_addr, 3); | ||
985 | free_netdev(de620_dev); | ||
986 | } | ||
987 | #endif /* MODULE */ | ||
988 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h new file mode 100644 index 000000000000..e8d9a88f4cb5 --- /dev/null +++ b/drivers/net/ethernet/dlink/de620.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /********************************************************* | ||
2 | * * | ||
3 | * Definition of D-Link DE-620 Ethernet Pocket adapter * | ||
4 | * * | ||
5 | *********************************************************/ | ||
6 | |||
7 | /* DE-620's CMD port Command */ | ||
8 | #define CS0 0x08 /* 1->0 command strobe */ | ||
9 | #define ICEN 0x04 /* 0=enable DL3520 host interface */ | ||
10 | #define DS0 0x02 /* 1->0 data strobe 0 */ | ||
11 | #define DS1 0x01 /* 1->0 data strobe 1 */ | ||
12 | |||
13 | #define WDIR 0x20 /* general 0=read 1=write */ | ||
14 | #define RDIR 0x00 /* (not 100% confirm ) */ | ||
15 | #define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */ | ||
16 | #define PS2RDIR 0x20 | ||
17 | |||
18 | #define IRQEN 0x10 /* 1 = enable printer IRQ line */ | ||
19 | #define SELECTIN 0x08 /* 1 = select printer */ | ||
20 | #define INITP 0x04 /* 0 = initial printer */ | ||
21 | #define AUTOFEED 0x02 /* 1 = printer auto form feed */ | ||
22 | #define STROBE 0x01 /* 0->1 data strobe */ | ||
23 | |||
24 | #define RESET 0x08 | ||
25 | #define NIS0 0x20 /* 0 = BNC, 1 = UTP */ | ||
26 | #define NCTL0 0x10 | ||
27 | |||
28 | /* DE-620 DIC Command */ | ||
29 | #define W_DUMMY 0x00 /* DIC reserved command */ | ||
30 | #define W_CR 0x20 /* DIC write command register */ | ||
31 | #define W_NPR 0x40 /* DIC write Next Page Register */ | ||
32 | #define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */ | ||
33 | #define W_RSA 0x80 /* DIC write Remote Start Addr 1 */ | ||
34 | |||
35 | /* DE-620's STAT port bits 7-4 */ | ||
36 | #define EMPTY 0x80 /* 1 = receive buffer empty */ | ||
37 | #define INTLEVEL 0x40 /* 1 = interrupt level is high */ | ||
38 | #define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */ | ||
39 | #define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */ | ||
40 | #define READY 0x08 /* 1 = h/w ready to accept cmd/data */ | ||
41 | |||
42 | /* IDC 1 Command */ | ||
43 | #define W_RSA1 0xa0 /* write remote start address 1 */ | ||
44 | #define W_RSA0 0xa1 /* write remote start address 0 */ | ||
45 | #define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */ | ||
46 | #define W_DFR 0xa3 /* write delay factor register */ | ||
47 | #define W_CPR 0xa4 /* write current page register */ | ||
48 | #define W_SPR 0xa5 /* write start page register */ | ||
49 | #define W_EPR 0xa6 /* write end page register */ | ||
50 | #define W_SCR 0xa7 /* write system configuration register */ | ||
51 | #define W_TCR 0xa8 /* write Transceiver Configuration reg */ | ||
52 | #define W_EIP 0xa9 /* write EEPM Interface port */ | ||
53 | #define W_PAR0 0xaa /* write physical address register 0 */ | ||
54 | #define W_PAR1 0xab /* write physical address register 1 */ | ||
55 | #define W_PAR2 0xac /* write physical address register 2 */ | ||
56 | #define W_PAR3 0xad /* write physical address register 3 */ | ||
57 | #define W_PAR4 0xae /* write physical address register 4 */ | ||
58 | #define W_PAR5 0xaf /* write physical address register 5 */ | ||
59 | |||
60 | /* IDC 2 Command */ | ||
61 | #define R_STS 0xc0 /* read status register */ | ||
62 | #define R_CPR 0xc1 /* read current page register */ | ||
63 | #define R_BPR 0xc2 /* read boundary page register */ | ||
64 | #define R_TDR 0xc3 /* read time domain reflectometry reg */ | ||
65 | |||
66 | /* STATUS Register */ | ||
67 | #define EEDI 0x80 /* EEPM DO pin */ | ||
68 | #define TXSUC 0x40 /* tx success */ | ||
69 | #define T16 0x20 /* tx fail 16 times */ | ||
70 | #define TS1 0x40 /* 0=Tx success, 1=T16 */ | ||
71 | #define TS0 0x20 /* 0=Tx success, 1=T16 */ | ||
72 | #define RXGOOD 0x10 /* rx a good packet */ | ||
73 | #define RXCRC 0x08 /* rx a CRC error packet */ | ||
74 | #define RXSHORT 0x04 /* rx a short packet */ | ||
75 | #define COLS 0x02 /* coaxial collision status */ | ||
76 | #define LNKS 0x01 /* UTP link status */ | ||
77 | |||
78 | /* Command Register */ | ||
79 | #define CLEAR 0x10 /* reset part of hardware */ | ||
80 | #define NOPER 0x08 /* No Operation */ | ||
81 | #define RNOP 0x08 | ||
82 | #define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */ | ||
83 | #define RRN 0x04 /* Normal Remote Read mode */ | ||
84 | #define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */ | ||
85 | #define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */ | ||
86 | #define TXEN 0x01 /* 0->1 tx enable */ | ||
87 | |||
88 | /* System Configuration Register */ | ||
89 | #define TESTON 0x80 /* test host data transfer reliability */ | ||
90 | #define SLEEP 0x40 /* sleep mode */ | ||
91 | #if 0 | ||
92 | #define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */ | ||
93 | #define BYTEMODE 0x02 /* byte mode */ | ||
94 | #else | ||
95 | #define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */ | ||
96 | #define BYTEMODE 0x10 /* byte mode */ | ||
97 | #endif | ||
98 | #define NIBBLEMODE 0x00 /* nibble mode */ | ||
99 | #define IRQINV 0x08 /* turn off IRQ line inverter */ | ||
100 | #define IRQNML 0x00 /* turn on IRQ line inverter */ | ||
101 | #define INTON 0x04 | ||
102 | #define AUTOFFSET 0x02 /* auto shift address to TPR+12 */ | ||
103 | #define AUTOTX 0x01 /* auto tx when leave RW mode */ | ||
104 | |||
105 | /* Transceiver Configuration Register */ | ||
106 | #define JABBER 0x80 /* generate jabber condition */ | ||
107 | #define TXSUCINT 0x40 /* enable tx success interrupt */ | ||
108 | #define T16INT 0x20 /* enable T16 interrupt */ | ||
109 | #define RXERRPKT 0x10 /* accept CRC error or short packet */ | ||
110 | #define EXTERNALB2 0x0C /* external loopback 2 */ | ||
111 | #define EXTERNALB1 0x08 /* external loopback 1 */ | ||
112 | #define INTERNALB 0x04 /* internal loopback */ | ||
113 | #define NMLOPERATE 0x00 /* normal operation */ | ||
114 | #define RXPBM 0x03 /* rx physical, broadcast, multicast */ | ||
115 | #define RXPB 0x02 /* rx physical, broadcast */ | ||
116 | #define RXALL 0x01 /* rx all packet */ | ||
117 | #define RXOFF 0x00 /* rx disable */ | ||
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c new file mode 100644 index 000000000000..ed73e4a93508 --- /dev/null +++ b/drivers/net/ethernet/dlink/dl2k.c | |||
@@ -0,0 +1,1824 @@ | |||
1 | /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ | ||
2 | /* | ||
3 | Copyright (c) 2001, 2002 by D-Link Corporation | ||
4 | Written by Edward Peng.<edward_peng@dlink.com.tw> | ||
5 | Created 03-May-2001, base on Linux' sundance.c. | ||
6 | |||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of the GNU General Public License as published by | ||
9 | the Free Software Foundation; either version 2 of the License, or | ||
10 | (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #define DRV_NAME "DL2000/TC902x-based linux driver" | ||
14 | #define DRV_VERSION "v1.19" | ||
15 | #define DRV_RELDATE "2007/08/12" | ||
16 | #include "dl2k.h" | ||
17 | #include <linux/dma-mapping.h> | ||
18 | |||
19 | static char version[] __devinitdata = | ||
20 | KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; | ||
21 | #define MAX_UNITS 8 | ||
22 | static int mtu[MAX_UNITS]; | ||
23 | static int vlan[MAX_UNITS]; | ||
24 | static int jumbo[MAX_UNITS]; | ||
25 | static char *media[MAX_UNITS]; | ||
26 | static int tx_flow=-1; | ||
27 | static int rx_flow=-1; | ||
28 | static int copy_thresh; | ||
29 | static int rx_coalesce=10; /* Rx frame count each interrupt */ | ||
30 | static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ | ||
31 | static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ | ||
32 | |||
33 | |||
34 | MODULE_AUTHOR ("Edward Peng"); | ||
35 | MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | module_param_array(mtu, int, NULL, 0); | ||
38 | module_param_array(media, charp, NULL, 0); | ||
39 | module_param_array(vlan, int, NULL, 0); | ||
40 | module_param_array(jumbo, int, NULL, 0); | ||
41 | module_param(tx_flow, int, 0); | ||
42 | module_param(rx_flow, int, 0); | ||
43 | module_param(copy_thresh, int, 0); | ||
44 | module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ | ||
45 | module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ | ||
46 | module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ | ||
47 | |||
48 | |||
49 | /* Enable the default interrupts */ | ||
50 | #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ | ||
51 | UpdateStats | LinkEvent) | ||
52 | #define EnableInt() \ | ||
53 | writew(DEFAULT_INTR, ioaddr + IntEnable) | ||
54 | |||
55 | static const int max_intrloop = 50; | ||
56 | static const int multicast_filter_limit = 0x40; | ||
57 | |||
58 | static int rio_open (struct net_device *dev); | ||
59 | static void rio_timer (unsigned long data); | ||
60 | static void rio_tx_timeout (struct net_device *dev); | ||
61 | static void alloc_list (struct net_device *dev); | ||
62 | static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); | ||
63 | static irqreturn_t rio_interrupt (int irq, void *dev_instance); | ||
64 | static void rio_free_tx (struct net_device *dev, int irq); | ||
65 | static void tx_error (struct net_device *dev, int tx_status); | ||
66 | static int receive_packet (struct net_device *dev); | ||
67 | static void rio_error (struct net_device *dev, int int_status); | ||
68 | static int change_mtu (struct net_device *dev, int new_mtu); | ||
69 | static void set_multicast (struct net_device *dev); | ||
70 | static struct net_device_stats *get_stats (struct net_device *dev); | ||
71 | static int clear_stats (struct net_device *dev); | ||
72 | static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); | ||
73 | static int rio_close (struct net_device *dev); | ||
74 | static int find_miiphy (struct net_device *dev); | ||
75 | static int parse_eeprom (struct net_device *dev); | ||
76 | static int read_eeprom (long ioaddr, int eep_addr); | ||
77 | static int mii_wait_link (struct net_device *dev, int wait); | ||
78 | static int mii_set_media (struct net_device *dev); | ||
79 | static int mii_get_media (struct net_device *dev); | ||
80 | static int mii_set_media_pcs (struct net_device *dev); | ||
81 | static int mii_get_media_pcs (struct net_device *dev); | ||
82 | static int mii_read (struct net_device *dev, int phy_addr, int reg_num); | ||
83 | static int mii_write (struct net_device *dev, int phy_addr, int reg_num, | ||
84 | u16 data); | ||
85 | |||
86 | static const struct ethtool_ops ethtool_ops; | ||
87 | |||
88 | static const struct net_device_ops netdev_ops = { | ||
89 | .ndo_open = rio_open, | ||
90 | .ndo_start_xmit = start_xmit, | ||
91 | .ndo_stop = rio_close, | ||
92 | .ndo_get_stats = get_stats, | ||
93 | .ndo_validate_addr = eth_validate_addr, | ||
94 | .ndo_set_mac_address = eth_mac_addr, | ||
95 | .ndo_set_multicast_list = set_multicast, | ||
96 | .ndo_do_ioctl = rio_ioctl, | ||
97 | .ndo_tx_timeout = rio_tx_timeout, | ||
98 | .ndo_change_mtu = change_mtu, | ||
99 | }; | ||
100 | |||
101 | static int __devinit | ||
102 | rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) | ||
103 | { | ||
104 | struct net_device *dev; | ||
105 | struct netdev_private *np; | ||
106 | static int card_idx; | ||
107 | int chip_idx = ent->driver_data; | ||
108 | int err, irq; | ||
109 | long ioaddr; | ||
110 | static int version_printed; | ||
111 | void *ring_space; | ||
112 | dma_addr_t ring_dma; | ||
113 | |||
114 | if (!version_printed++) | ||
115 | printk ("%s", version); | ||
116 | |||
117 | err = pci_enable_device (pdev); | ||
118 | if (err) | ||
119 | return err; | ||
120 | |||
121 | irq = pdev->irq; | ||
122 | err = pci_request_regions (pdev, "dl2k"); | ||
123 | if (err) | ||
124 | goto err_out_disable; | ||
125 | |||
126 | pci_set_master (pdev); | ||
127 | dev = alloc_etherdev (sizeof (*np)); | ||
128 | if (!dev) { | ||
129 | err = -ENOMEM; | ||
130 | goto err_out_res; | ||
131 | } | ||
132 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
133 | |||
134 | #ifdef MEM_MAPPING | ||
135 | ioaddr = pci_resource_start (pdev, 1); | ||
136 | ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); | ||
137 | if (!ioaddr) { | ||
138 | err = -ENOMEM; | ||
139 | goto err_out_dev; | ||
140 | } | ||
141 | #else | ||
142 | ioaddr = pci_resource_start (pdev, 0); | ||
143 | #endif | ||
144 | dev->base_addr = ioaddr; | ||
145 | dev->irq = irq; | ||
146 | np = netdev_priv(dev); | ||
147 | np->chip_id = chip_idx; | ||
148 | np->pdev = pdev; | ||
149 | spin_lock_init (&np->tx_lock); | ||
150 | spin_lock_init (&np->rx_lock); | ||
151 | |||
152 | /* Parse manual configuration */ | ||
153 | np->an_enable = 1; | ||
154 | np->tx_coalesce = 1; | ||
155 | if (card_idx < MAX_UNITS) { | ||
156 | if (media[card_idx] != NULL) { | ||
157 | np->an_enable = 0; | ||
158 | if (strcmp (media[card_idx], "auto") == 0 || | ||
159 | strcmp (media[card_idx], "autosense") == 0 || | ||
160 | strcmp (media[card_idx], "0") == 0 ) { | ||
161 | np->an_enable = 2; | ||
162 | } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || | ||
163 | strcmp (media[card_idx], "4") == 0) { | ||
164 | np->speed = 100; | ||
165 | np->full_duplex = 1; | ||
166 | } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || | ||
167 | strcmp (media[card_idx], "3") == 0) { | ||
168 | np->speed = 100; | ||
169 | np->full_duplex = 0; | ||
170 | } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || | ||
171 | strcmp (media[card_idx], "2") == 0) { | ||
172 | np->speed = 10; | ||
173 | np->full_duplex = 1; | ||
174 | } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || | ||
175 | strcmp (media[card_idx], "1") == 0) { | ||
176 | np->speed = 10; | ||
177 | np->full_duplex = 0; | ||
178 | } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || | ||
179 | strcmp (media[card_idx], "6") == 0) { | ||
180 | np->speed=1000; | ||
181 | np->full_duplex=1; | ||
182 | } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || | ||
183 | strcmp (media[card_idx], "5") == 0) { | ||
184 | np->speed = 1000; | ||
185 | np->full_duplex = 0; | ||
186 | } else { | ||
187 | np->an_enable = 1; | ||
188 | } | ||
189 | } | ||
190 | if (jumbo[card_idx] != 0) { | ||
191 | np->jumbo = 1; | ||
192 | dev->mtu = MAX_JUMBO; | ||
193 | } else { | ||
194 | np->jumbo = 0; | ||
195 | if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) | ||
196 | dev->mtu = mtu[card_idx]; | ||
197 | } | ||
198 | np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? | ||
199 | vlan[card_idx] : 0; | ||
200 | if (rx_coalesce > 0 && rx_timeout > 0) { | ||
201 | np->rx_coalesce = rx_coalesce; | ||
202 | np->rx_timeout = rx_timeout; | ||
203 | np->coalesce = 1; | ||
204 | } | ||
205 | np->tx_flow = (tx_flow == 0) ? 0 : 1; | ||
206 | np->rx_flow = (rx_flow == 0) ? 0 : 1; | ||
207 | |||
208 | if (tx_coalesce < 1) | ||
209 | tx_coalesce = 1; | ||
210 | else if (tx_coalesce > TX_RING_SIZE-1) | ||
211 | tx_coalesce = TX_RING_SIZE - 1; | ||
212 | } | ||
213 | dev->netdev_ops = &netdev_ops; | ||
214 | dev->watchdog_timeo = TX_TIMEOUT; | ||
215 | SET_ETHTOOL_OPS(dev, ðtool_ops); | ||
216 | #if 0 | ||
217 | dev->features = NETIF_F_IP_CSUM; | ||
218 | #endif | ||
219 | pci_set_drvdata (pdev, dev); | ||
220 | |||
221 | ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); | ||
222 | if (!ring_space) | ||
223 | goto err_out_iounmap; | ||
224 | np->tx_ring = ring_space; | ||
225 | np->tx_ring_dma = ring_dma; | ||
226 | |||
227 | ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); | ||
228 | if (!ring_space) | ||
229 | goto err_out_unmap_tx; | ||
230 | np->rx_ring = ring_space; | ||
231 | np->rx_ring_dma = ring_dma; | ||
232 | |||
233 | /* Parse eeprom data */ | ||
234 | parse_eeprom (dev); | ||
235 | |||
236 | /* Find PHY address */ | ||
237 | err = find_miiphy (dev); | ||
238 | if (err) | ||
239 | goto err_out_unmap_rx; | ||
240 | |||
241 | /* Fiber device? */ | ||
242 | np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; | ||
243 | np->link_status = 0; | ||
244 | /* Set media and reset PHY */ | ||
245 | if (np->phy_media) { | ||
246 | /* default Auto-Negotiation for fiber deivices */ | ||
247 | if (np->an_enable == 2) { | ||
248 | np->an_enable = 1; | ||
249 | } | ||
250 | mii_set_media_pcs (dev); | ||
251 | } else { | ||
252 | /* Auto-Negotiation is mandatory for 1000BASE-T, | ||
253 | IEEE 802.3ab Annex 28D page 14 */ | ||
254 | if (np->speed == 1000) | ||
255 | np->an_enable = 1; | ||
256 | mii_set_media (dev); | ||
257 | } | ||
258 | |||
259 | err = register_netdev (dev); | ||
260 | if (err) | ||
261 | goto err_out_unmap_rx; | ||
262 | |||
263 | card_idx++; | ||
264 | |||
265 | printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", | ||
266 | dev->name, np->name, dev->dev_addr, irq); | ||
267 | if (tx_coalesce > 1) | ||
268 | printk(KERN_INFO "tx_coalesce:\t%d packets\n", | ||
269 | tx_coalesce); | ||
270 | if (np->coalesce) | ||
271 | printk(KERN_INFO | ||
272 | "rx_coalesce:\t%d packets\n" | ||
273 | "rx_timeout: \t%d ns\n", | ||
274 | np->rx_coalesce, np->rx_timeout*640); | ||
275 | if (np->vlan) | ||
276 | printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); | ||
277 | return 0; | ||
278 | |||
279 | err_out_unmap_rx: | ||
280 | pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); | ||
281 | err_out_unmap_tx: | ||
282 | pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); | ||
283 | err_out_iounmap: | ||
284 | #ifdef MEM_MAPPING | ||
285 | iounmap ((void *) ioaddr); | ||
286 | |||
287 | err_out_dev: | ||
288 | #endif | ||
289 | free_netdev (dev); | ||
290 | |||
291 | err_out_res: | ||
292 | pci_release_regions (pdev); | ||
293 | |||
294 | err_out_disable: | ||
295 | pci_disable_device (pdev); | ||
296 | return err; | ||
297 | } | ||
298 | |||
299 | static int | ||
300 | find_miiphy (struct net_device *dev) | ||
301 | { | ||
302 | int i, phy_found = 0; | ||
303 | struct netdev_private *np; | ||
304 | long ioaddr; | ||
305 | np = netdev_priv(dev); | ||
306 | ioaddr = dev->base_addr; | ||
307 | np->phy_addr = 1; | ||
308 | |||
309 | for (i = 31; i >= 0; i--) { | ||
310 | int mii_status = mii_read (dev, i, 1); | ||
311 | if (mii_status != 0xffff && mii_status != 0x0000) { | ||
312 | np->phy_addr = i; | ||
313 | phy_found++; | ||
314 | } | ||
315 | } | ||
316 | if (!phy_found) { | ||
317 | printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); | ||
318 | return -ENODEV; | ||
319 | } | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | static int | ||
324 | parse_eeprom (struct net_device *dev) | ||
325 | { | ||
326 | int i, j; | ||
327 | long ioaddr = dev->base_addr; | ||
328 | u8 sromdata[256]; | ||
329 | u8 *psib; | ||
330 | u32 crc; | ||
331 | PSROM_t psrom = (PSROM_t) sromdata; | ||
332 | struct netdev_private *np = netdev_priv(dev); | ||
333 | |||
334 | int cid, next; | ||
335 | |||
336 | #ifdef MEM_MAPPING | ||
337 | ioaddr = pci_resource_start (np->pdev, 0); | ||
338 | #endif | ||
339 | /* Read eeprom */ | ||
340 | for (i = 0; i < 128; i++) { | ||
341 | ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); | ||
342 | } | ||
343 | #ifdef MEM_MAPPING | ||
344 | ioaddr = dev->base_addr; | ||
345 | #endif | ||
346 | if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ | ||
347 | /* Check CRC */ | ||
348 | crc = ~ether_crc_le (256 - 4, sromdata); | ||
349 | if (psrom->crc != cpu_to_le32(crc)) { | ||
350 | printk (KERN_ERR "%s: EEPROM data CRC error.\n", | ||
351 | dev->name); | ||
352 | return -1; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | /* Set MAC address */ | ||
357 | for (i = 0; i < 6; i++) | ||
358 | dev->dev_addr[i] = psrom->mac_addr[i]; | ||
359 | |||
360 | if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /* Parse Software Information Block */ | ||
365 | i = 0x30; | ||
366 | psib = (u8 *) sromdata; | ||
367 | do { | ||
368 | cid = psib[i++]; | ||
369 | next = psib[i++]; | ||
370 | if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { | ||
371 | printk (KERN_ERR "Cell data error\n"); | ||
372 | return -1; | ||
373 | } | ||
374 | switch (cid) { | ||
375 | case 0: /* Format version */ | ||
376 | break; | ||
377 | case 1: /* End of cell */ | ||
378 | return 0; | ||
379 | case 2: /* Duplex Polarity */ | ||
380 | np->duplex_polarity = psib[i]; | ||
381 | writeb (readb (ioaddr + PhyCtrl) | psib[i], | ||
382 | ioaddr + PhyCtrl); | ||
383 | break; | ||
384 | case 3: /* Wake Polarity */ | ||
385 | np->wake_polarity = psib[i]; | ||
386 | break; | ||
387 | case 9: /* Adapter description */ | ||
388 | j = (next - i > 255) ? 255 : next - i; | ||
389 | memcpy (np->name, &(psib[i]), j); | ||
390 | break; | ||
391 | case 4: | ||
392 | case 5: | ||
393 | case 6: | ||
394 | case 7: | ||
395 | case 8: /* Reversed */ | ||
396 | break; | ||
397 | default: /* Unknown cell */ | ||
398 | return -1; | ||
399 | } | ||
400 | i = next; | ||
401 | } while (1); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int | ||
407 | rio_open (struct net_device *dev) | ||
408 | { | ||
409 | struct netdev_private *np = netdev_priv(dev); | ||
410 | long ioaddr = dev->base_addr; | ||
411 | int i; | ||
412 | u16 macctrl; | ||
413 | |||
414 | i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); | ||
415 | if (i) | ||
416 | return i; | ||
417 | |||
418 | /* Reset all logic functions */ | ||
419 | writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, | ||
420 | ioaddr + ASICCtrl + 2); | ||
421 | mdelay(10); | ||
422 | |||
423 | /* DebugCtrl bit 4, 5, 9 must set */ | ||
424 | writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); | ||
425 | |||
426 | /* Jumbo frame */ | ||
427 | if (np->jumbo != 0) | ||
428 | writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); | ||
429 | |||
430 | alloc_list (dev); | ||
431 | |||
432 | /* Get station address */ | ||
433 | for (i = 0; i < 6; i++) | ||
434 | writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); | ||
435 | |||
436 | set_multicast (dev); | ||
437 | if (np->coalesce) { | ||
438 | writel (np->rx_coalesce | np->rx_timeout << 16, | ||
439 | ioaddr + RxDMAIntCtrl); | ||
440 | } | ||
441 | /* Set RIO to poll every N*320nsec. */ | ||
442 | writeb (0x20, ioaddr + RxDMAPollPeriod); | ||
443 | writeb (0xff, ioaddr + TxDMAPollPeriod); | ||
444 | writeb (0x30, ioaddr + RxDMABurstThresh); | ||
445 | writeb (0x30, ioaddr + RxDMAUrgentThresh); | ||
446 | writel (0x0007ffff, ioaddr + RmonStatMask); | ||
447 | /* clear statistics */ | ||
448 | clear_stats (dev); | ||
449 | |||
450 | /* VLAN supported */ | ||
451 | if (np->vlan) { | ||
452 | /* priority field in RxDMAIntCtrl */ | ||
453 | writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, | ||
454 | ioaddr + RxDMAIntCtrl); | ||
455 | /* VLANId */ | ||
456 | writew (np->vlan, ioaddr + VLANId); | ||
457 | /* Length/Type should be 0x8100 */ | ||
458 | writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); | ||
459 | /* Enable AutoVLANuntagging, but disable AutoVLANtagging. | ||
460 | VLAN information tagged by TFC' VID, CFI fields. */ | ||
461 | writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, | ||
462 | ioaddr + MACCtrl); | ||
463 | } | ||
464 | |||
465 | init_timer (&np->timer); | ||
466 | np->timer.expires = jiffies + 1*HZ; | ||
467 | np->timer.data = (unsigned long) dev; | ||
468 | np->timer.function = rio_timer; | ||
469 | add_timer (&np->timer); | ||
470 | |||
471 | /* Start Tx/Rx */ | ||
472 | writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, | ||
473 | ioaddr + MACCtrl); | ||
474 | |||
475 | macctrl = 0; | ||
476 | macctrl |= (np->vlan) ? AutoVLANuntagging : 0; | ||
477 | macctrl |= (np->full_duplex) ? DuplexSelect : 0; | ||
478 | macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; | ||
479 | macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; | ||
480 | writew(macctrl, ioaddr + MACCtrl); | ||
481 | |||
482 | netif_start_queue (dev); | ||
483 | |||
484 | /* Enable default interrupts */ | ||
485 | EnableInt (); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | static void | ||
490 | rio_timer (unsigned long data) | ||
491 | { | ||
492 | struct net_device *dev = (struct net_device *)data; | ||
493 | struct netdev_private *np = netdev_priv(dev); | ||
494 | unsigned int entry; | ||
495 | int next_tick = 1*HZ; | ||
496 | unsigned long flags; | ||
497 | |||
498 | spin_lock_irqsave(&np->rx_lock, flags); | ||
499 | /* Recover rx ring exhausted error */ | ||
500 | if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { | ||
501 | printk(KERN_INFO "Try to recover rx ring exhausted...\n"); | ||
502 | /* Re-allocate skbuffs to fill the descriptor ring */ | ||
503 | for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { | ||
504 | struct sk_buff *skb; | ||
505 | entry = np->old_rx % RX_RING_SIZE; | ||
506 | /* Dropped packets don't need to re-allocate */ | ||
507 | if (np->rx_skbuff[entry] == NULL) { | ||
508 | skb = netdev_alloc_skb_ip_align(dev, | ||
509 | np->rx_buf_sz); | ||
510 | if (skb == NULL) { | ||
511 | np->rx_ring[entry].fraginfo = 0; | ||
512 | printk (KERN_INFO | ||
513 | "%s: Still unable to re-allocate Rx skbuff.#%d\n", | ||
514 | dev->name, entry); | ||
515 | break; | ||
516 | } | ||
517 | np->rx_skbuff[entry] = skb; | ||
518 | np->rx_ring[entry].fraginfo = | ||
519 | cpu_to_le64 (pci_map_single | ||
520 | (np->pdev, skb->data, np->rx_buf_sz, | ||
521 | PCI_DMA_FROMDEVICE)); | ||
522 | } | ||
523 | np->rx_ring[entry].fraginfo |= | ||
524 | cpu_to_le64((u64)np->rx_buf_sz << 48); | ||
525 | np->rx_ring[entry].status = 0; | ||
526 | } /* end for */ | ||
527 | } /* end if */ | ||
528 | spin_unlock_irqrestore (&np->rx_lock, flags); | ||
529 | np->timer.expires = jiffies + next_tick; | ||
530 | add_timer(&np->timer); | ||
531 | } | ||
532 | |||
533 | static void | ||
534 | rio_tx_timeout (struct net_device *dev) | ||
535 | { | ||
536 | long ioaddr = dev->base_addr; | ||
537 | |||
538 | printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", | ||
539 | dev->name, readl (ioaddr + TxStatus)); | ||
540 | rio_free_tx(dev, 0); | ||
541 | dev->if_port = 0; | ||
542 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
543 | } | ||
544 | |||
545 | /* allocate and initialize Tx and Rx descriptors */ | ||
546 | static void | ||
547 | alloc_list (struct net_device *dev) | ||
548 | { | ||
549 | struct netdev_private *np = netdev_priv(dev); | ||
550 | int i; | ||
551 | |||
552 | np->cur_rx = np->cur_tx = 0; | ||
553 | np->old_rx = np->old_tx = 0; | ||
554 | np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); | ||
555 | |||
556 | /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ | ||
557 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
558 | np->tx_skbuff[i] = NULL; | ||
559 | np->tx_ring[i].status = cpu_to_le64 (TFDDone); | ||
560 | np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + | ||
561 | ((i+1)%TX_RING_SIZE) * | ||
562 | sizeof (struct netdev_desc)); | ||
563 | } | ||
564 | |||
565 | /* Initialize Rx descriptors */ | ||
566 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
567 | np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + | ||
568 | ((i + 1) % RX_RING_SIZE) * | ||
569 | sizeof (struct netdev_desc)); | ||
570 | np->rx_ring[i].status = 0; | ||
571 | np->rx_ring[i].fraginfo = 0; | ||
572 | np->rx_skbuff[i] = NULL; | ||
573 | } | ||
574 | |||
575 | /* Allocate the rx buffers */ | ||
576 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
577 | /* Allocated fixed size of skbuff */ | ||
578 | struct sk_buff *skb; | ||
579 | |||
580 | skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); | ||
581 | np->rx_skbuff[i] = skb; | ||
582 | if (skb == NULL) { | ||
583 | printk (KERN_ERR | ||
584 | "%s: alloc_list: allocate Rx buffer error! ", | ||
585 | dev->name); | ||
586 | break; | ||
587 | } | ||
588 | /* Rubicon now supports 40 bits of addressing space. */ | ||
589 | np->rx_ring[i].fraginfo = | ||
590 | cpu_to_le64 ( pci_map_single ( | ||
591 | np->pdev, skb->data, np->rx_buf_sz, | ||
592 | PCI_DMA_FROMDEVICE)); | ||
593 | np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); | ||
594 | } | ||
595 | |||
596 | /* Set RFDListPtr */ | ||
597 | writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); | ||
598 | writel (0, dev->base_addr + RFDListPtr1); | ||
599 | } | ||
600 | |||
601 | static netdev_tx_t | ||
602 | start_xmit (struct sk_buff *skb, struct net_device *dev) | ||
603 | { | ||
604 | struct netdev_private *np = netdev_priv(dev); | ||
605 | struct netdev_desc *txdesc; | ||
606 | unsigned entry; | ||
607 | u32 ioaddr; | ||
608 | u64 tfc_vlan_tag = 0; | ||
609 | |||
610 | if (np->link_status == 0) { /* Link Down */ | ||
611 | dev_kfree_skb(skb); | ||
612 | return NETDEV_TX_OK; | ||
613 | } | ||
614 | ioaddr = dev->base_addr; | ||
615 | entry = np->cur_tx % TX_RING_SIZE; | ||
616 | np->tx_skbuff[entry] = skb; | ||
617 | txdesc = &np->tx_ring[entry]; | ||
618 | |||
619 | #if 0 | ||
620 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
621 | txdesc->status |= | ||
622 | cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | | ||
623 | IPChecksumEnable); | ||
624 | } | ||
625 | #endif | ||
626 | if (np->vlan) { | ||
627 | tfc_vlan_tag = VLANTagInsert | | ||
628 | ((u64)np->vlan << 32) | | ||
629 | ((u64)skb->priority << 45); | ||
630 | } | ||
631 | txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, | ||
632 | skb->len, | ||
633 | PCI_DMA_TODEVICE)); | ||
634 | txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); | ||
635 | |||
636 | /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode | ||
637 | * Work around: Always use 1 descriptor in 10Mbps mode */ | ||
638 | if (entry % np->tx_coalesce == 0 || np->speed == 10) | ||
639 | txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | | ||
640 | WordAlignDisable | | ||
641 | TxDMAIndicate | | ||
642 | (1 << FragCountShift)); | ||
643 | else | ||
644 | txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | | ||
645 | WordAlignDisable | | ||
646 | (1 << FragCountShift)); | ||
647 | |||
648 | /* TxDMAPollNow */ | ||
649 | writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); | ||
650 | /* Schedule ISR */ | ||
651 | writel(10000, ioaddr + CountDown); | ||
652 | np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; | ||
653 | if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE | ||
654 | < TX_QUEUE_LEN - 1 && np->speed != 10) { | ||
655 | /* do nothing */ | ||
656 | } else if (!netif_queue_stopped(dev)) { | ||
657 | netif_stop_queue (dev); | ||
658 | } | ||
659 | |||
660 | /* The first TFDListPtr */ | ||
661 | if (readl (dev->base_addr + TFDListPtr0) == 0) { | ||
662 | writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), | ||
663 | dev->base_addr + TFDListPtr0); | ||
664 | writel (0, dev->base_addr + TFDListPtr1); | ||
665 | } | ||
666 | |||
667 | return NETDEV_TX_OK; | ||
668 | } | ||
669 | |||
670 | static irqreturn_t | ||
671 | rio_interrupt (int irq, void *dev_instance) | ||
672 | { | ||
673 | struct net_device *dev = dev_instance; | ||
674 | struct netdev_private *np; | ||
675 | unsigned int_status; | ||
676 | long ioaddr; | ||
677 | int cnt = max_intrloop; | ||
678 | int handled = 0; | ||
679 | |||
680 | ioaddr = dev->base_addr; | ||
681 | np = netdev_priv(dev); | ||
682 | while (1) { | ||
683 | int_status = readw (ioaddr + IntStatus); | ||
684 | writew (int_status, ioaddr + IntStatus); | ||
685 | int_status &= DEFAULT_INTR; | ||
686 | if (int_status == 0 || --cnt < 0) | ||
687 | break; | ||
688 | handled = 1; | ||
689 | /* Processing received packets */ | ||
690 | if (int_status & RxDMAComplete) | ||
691 | receive_packet (dev); | ||
692 | /* TxDMAComplete interrupt */ | ||
693 | if ((int_status & (TxDMAComplete|IntRequested))) { | ||
694 | int tx_status; | ||
695 | tx_status = readl (ioaddr + TxStatus); | ||
696 | if (tx_status & 0x01) | ||
697 | tx_error (dev, tx_status); | ||
698 | /* Free used tx skbuffs */ | ||
699 | rio_free_tx (dev, 1); | ||
700 | } | ||
701 | |||
702 | /* Handle uncommon events */ | ||
703 | if (int_status & | ||
704 | (HostError | LinkEvent | UpdateStats)) | ||
705 | rio_error (dev, int_status); | ||
706 | } | ||
707 | if (np->cur_tx != np->old_tx) | ||
708 | writel (100, ioaddr + CountDown); | ||
709 | return IRQ_RETVAL(handled); | ||
710 | } | ||
711 | |||
712 | static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) | ||
713 | { | ||
714 | return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); | ||
715 | } | ||
716 | |||
717 | static void | ||
718 | rio_free_tx (struct net_device *dev, int irq) | ||
719 | { | ||
720 | struct netdev_private *np = netdev_priv(dev); | ||
721 | int entry = np->old_tx % TX_RING_SIZE; | ||
722 | int tx_use = 0; | ||
723 | unsigned long flag = 0; | ||
724 | |||
725 | if (irq) | ||
726 | spin_lock(&np->tx_lock); | ||
727 | else | ||
728 | spin_lock_irqsave(&np->tx_lock, flag); | ||
729 | |||
730 | /* Free used tx skbuffs */ | ||
731 | while (entry != np->cur_tx) { | ||
732 | struct sk_buff *skb; | ||
733 | |||
734 | if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) | ||
735 | break; | ||
736 | skb = np->tx_skbuff[entry]; | ||
737 | pci_unmap_single (np->pdev, | ||
738 | desc_to_dma(&np->tx_ring[entry]), | ||
739 | skb->len, PCI_DMA_TODEVICE); | ||
740 | if (irq) | ||
741 | dev_kfree_skb_irq (skb); | ||
742 | else | ||
743 | dev_kfree_skb (skb); | ||
744 | |||
745 | np->tx_skbuff[entry] = NULL; | ||
746 | entry = (entry + 1) % TX_RING_SIZE; | ||
747 | tx_use++; | ||
748 | } | ||
749 | if (irq) | ||
750 | spin_unlock(&np->tx_lock); | ||
751 | else | ||
752 | spin_unlock_irqrestore(&np->tx_lock, flag); | ||
753 | np->old_tx = entry; | ||
754 | |||
755 | /* If the ring is no longer full, clear tx_full and | ||
756 | call netif_wake_queue() */ | ||
757 | |||
758 | if (netif_queue_stopped(dev) && | ||
759 | ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE | ||
760 | < TX_QUEUE_LEN - 1 || np->speed == 10)) { | ||
761 | netif_wake_queue (dev); | ||
762 | } | ||
763 | } | ||
764 | |||
765 | static void | ||
766 | tx_error (struct net_device *dev, int tx_status) | ||
767 | { | ||
768 | struct netdev_private *np; | ||
769 | long ioaddr = dev->base_addr; | ||
770 | int frame_id; | ||
771 | int i; | ||
772 | |||
773 | np = netdev_priv(dev); | ||
774 | |||
775 | frame_id = (tx_status & 0xffff0000); | ||
776 | printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", | ||
777 | dev->name, tx_status, frame_id); | ||
778 | np->stats.tx_errors++; | ||
779 | /* Ttransmit Underrun */ | ||
780 | if (tx_status & 0x10) { | ||
781 | np->stats.tx_fifo_errors++; | ||
782 | writew (readw (ioaddr + TxStartThresh) + 0x10, | ||
783 | ioaddr + TxStartThresh); | ||
784 | /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ | ||
785 | writew (TxReset | DMAReset | FIFOReset | NetworkReset, | ||
786 | ioaddr + ASICCtrl + 2); | ||
787 | /* Wait for ResetBusy bit clear */ | ||
788 | for (i = 50; i > 0; i--) { | ||
789 | if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) | ||
790 | break; | ||
791 | mdelay (1); | ||
792 | } | ||
793 | rio_free_tx (dev, 1); | ||
794 | /* Reset TFDListPtr */ | ||
795 | writel (np->tx_ring_dma + | ||
796 | np->old_tx * sizeof (struct netdev_desc), | ||
797 | dev->base_addr + TFDListPtr0); | ||
798 | writel (0, dev->base_addr + TFDListPtr1); | ||
799 | |||
800 | /* Let TxStartThresh stay default value */ | ||
801 | } | ||
802 | /* Late Collision */ | ||
803 | if (tx_status & 0x04) { | ||
804 | np->stats.tx_fifo_errors++; | ||
805 | /* TxReset and clear FIFO */ | ||
806 | writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); | ||
807 | /* Wait reset done */ | ||
808 | for (i = 50; i > 0; i--) { | ||
809 | if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) | ||
810 | break; | ||
811 | mdelay (1); | ||
812 | } | ||
813 | /* Let TxStartThresh stay default value */ | ||
814 | } | ||
815 | /* Maximum Collisions */ | ||
816 | #ifdef ETHER_STATS | ||
817 | if (tx_status & 0x08) | ||
818 | np->stats.collisions16++; | ||
819 | #else | ||
820 | if (tx_status & 0x08) | ||
821 | np->stats.collisions++; | ||
822 | #endif | ||
823 | /* Restart the Tx */ | ||
824 | writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); | ||
825 | } | ||
826 | |||
827 | static int | ||
828 | receive_packet (struct net_device *dev) | ||
829 | { | ||
830 | struct netdev_private *np = netdev_priv(dev); | ||
831 | int entry = np->cur_rx % RX_RING_SIZE; | ||
832 | int cnt = 30; | ||
833 | |||
834 | /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ | ||
835 | while (1) { | ||
836 | struct netdev_desc *desc = &np->rx_ring[entry]; | ||
837 | int pkt_len; | ||
838 | u64 frame_status; | ||
839 | |||
840 | if (!(desc->status & cpu_to_le64(RFDDone)) || | ||
841 | !(desc->status & cpu_to_le64(FrameStart)) || | ||
842 | !(desc->status & cpu_to_le64(FrameEnd))) | ||
843 | break; | ||
844 | |||
845 | /* Chip omits the CRC. */ | ||
846 | frame_status = le64_to_cpu(desc->status); | ||
847 | pkt_len = frame_status & 0xffff; | ||
848 | if (--cnt < 0) | ||
849 | break; | ||
850 | /* Update rx error statistics, drop packet. */ | ||
851 | if (frame_status & RFS_Errors) { | ||
852 | np->stats.rx_errors++; | ||
853 | if (frame_status & (RxRuntFrame | RxLengthError)) | ||
854 | np->stats.rx_length_errors++; | ||
855 | if (frame_status & RxFCSError) | ||
856 | np->stats.rx_crc_errors++; | ||
857 | if (frame_status & RxAlignmentError && np->speed != 1000) | ||
858 | np->stats.rx_frame_errors++; | ||
859 | if (frame_status & RxFIFOOverrun) | ||
860 | np->stats.rx_fifo_errors++; | ||
861 | } else { | ||
862 | struct sk_buff *skb; | ||
863 | |||
864 | /* Small skbuffs for short packets */ | ||
865 | if (pkt_len > copy_thresh) { | ||
866 | pci_unmap_single (np->pdev, | ||
867 | desc_to_dma(desc), | ||
868 | np->rx_buf_sz, | ||
869 | PCI_DMA_FROMDEVICE); | ||
870 | skb_put (skb = np->rx_skbuff[entry], pkt_len); | ||
871 | np->rx_skbuff[entry] = NULL; | ||
872 | } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { | ||
873 | pci_dma_sync_single_for_cpu(np->pdev, | ||
874 | desc_to_dma(desc), | ||
875 | np->rx_buf_sz, | ||
876 | PCI_DMA_FROMDEVICE); | ||
877 | skb_copy_to_linear_data (skb, | ||
878 | np->rx_skbuff[entry]->data, | ||
879 | pkt_len); | ||
880 | skb_put (skb, pkt_len); | ||
881 | pci_dma_sync_single_for_device(np->pdev, | ||
882 | desc_to_dma(desc), | ||
883 | np->rx_buf_sz, | ||
884 | PCI_DMA_FROMDEVICE); | ||
885 | } | ||
886 | skb->protocol = eth_type_trans (skb, dev); | ||
887 | #if 0 | ||
888 | /* Checksum done by hw, but csum value unavailable. */ | ||
889 | if (np->pdev->pci_rev_id >= 0x0c && | ||
890 | !(frame_status & (TCPError | UDPError | IPError))) { | ||
891 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
892 | } | ||
893 | #endif | ||
894 | netif_rx (skb); | ||
895 | } | ||
896 | entry = (entry + 1) % RX_RING_SIZE; | ||
897 | } | ||
898 | spin_lock(&np->rx_lock); | ||
899 | np->cur_rx = entry; | ||
900 | /* Re-allocate skbuffs to fill the descriptor ring */ | ||
901 | entry = np->old_rx; | ||
902 | while (entry != np->cur_rx) { | ||
903 | struct sk_buff *skb; | ||
904 | /* Dropped packets don't need to re-allocate */ | ||
905 | if (np->rx_skbuff[entry] == NULL) { | ||
906 | skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); | ||
907 | if (skb == NULL) { | ||
908 | np->rx_ring[entry].fraginfo = 0; | ||
909 | printk (KERN_INFO | ||
910 | "%s: receive_packet: " | ||
911 | "Unable to re-allocate Rx skbuff.#%d\n", | ||
912 | dev->name, entry); | ||
913 | break; | ||
914 | } | ||
915 | np->rx_skbuff[entry] = skb; | ||
916 | np->rx_ring[entry].fraginfo = | ||
917 | cpu_to_le64 (pci_map_single | ||
918 | (np->pdev, skb->data, np->rx_buf_sz, | ||
919 | PCI_DMA_FROMDEVICE)); | ||
920 | } | ||
921 | np->rx_ring[entry].fraginfo |= | ||
922 | cpu_to_le64((u64)np->rx_buf_sz << 48); | ||
923 | np->rx_ring[entry].status = 0; | ||
924 | entry = (entry + 1) % RX_RING_SIZE; | ||
925 | } | ||
926 | np->old_rx = entry; | ||
927 | spin_unlock(&np->rx_lock); | ||
928 | return 0; | ||
929 | } | ||
930 | |||
931 | static void | ||
932 | rio_error (struct net_device *dev, int int_status) | ||
933 | { | ||
934 | long ioaddr = dev->base_addr; | ||
935 | struct netdev_private *np = netdev_priv(dev); | ||
936 | u16 macctrl; | ||
937 | |||
938 | /* Link change event */ | ||
939 | if (int_status & LinkEvent) { | ||
940 | if (mii_wait_link (dev, 10) == 0) { | ||
941 | printk (KERN_INFO "%s: Link up\n", dev->name); | ||
942 | if (np->phy_media) | ||
943 | mii_get_media_pcs (dev); | ||
944 | else | ||
945 | mii_get_media (dev); | ||
946 | if (np->speed == 1000) | ||
947 | np->tx_coalesce = tx_coalesce; | ||
948 | else | ||
949 | np->tx_coalesce = 1; | ||
950 | macctrl = 0; | ||
951 | macctrl |= (np->vlan) ? AutoVLANuntagging : 0; | ||
952 | macctrl |= (np->full_duplex) ? DuplexSelect : 0; | ||
953 | macctrl |= (np->tx_flow) ? | ||
954 | TxFlowControlEnable : 0; | ||
955 | macctrl |= (np->rx_flow) ? | ||
956 | RxFlowControlEnable : 0; | ||
957 | writew(macctrl, ioaddr + MACCtrl); | ||
958 | np->link_status = 1; | ||
959 | netif_carrier_on(dev); | ||
960 | } else { | ||
961 | printk (KERN_INFO "%s: Link off\n", dev->name); | ||
962 | np->link_status = 0; | ||
963 | netif_carrier_off(dev); | ||
964 | } | ||
965 | } | ||
966 | |||
967 | /* UpdateStats statistics registers */ | ||
968 | if (int_status & UpdateStats) { | ||
969 | get_stats (dev); | ||
970 | } | ||
971 | |||
972 | /* PCI Error, a catastronphic error related to the bus interface | ||
973 | occurs, set GlobalReset and HostReset to reset. */ | ||
974 | if (int_status & HostError) { | ||
975 | printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", | ||
976 | dev->name, int_status); | ||
977 | writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); | ||
978 | mdelay (500); | ||
979 | } | ||
980 | } | ||
981 | |||
982 | static struct net_device_stats * | ||
983 | get_stats (struct net_device *dev) | ||
984 | { | ||
985 | long ioaddr = dev->base_addr; | ||
986 | struct netdev_private *np = netdev_priv(dev); | ||
987 | #ifdef MEM_MAPPING | ||
988 | int i; | ||
989 | #endif | ||
990 | unsigned int stat_reg; | ||
991 | |||
992 | /* All statistics registers need to be acknowledged, | ||
993 | else statistic overflow could cause problems */ | ||
994 | |||
995 | np->stats.rx_packets += readl (ioaddr + FramesRcvOk); | ||
996 | np->stats.tx_packets += readl (ioaddr + FramesXmtOk); | ||
997 | np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); | ||
998 | np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); | ||
999 | |||
1000 | np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); | ||
1001 | np->stats.collisions += readl (ioaddr + SingleColFrames) | ||
1002 | + readl (ioaddr + MultiColFrames); | ||
1003 | |||
1004 | /* detailed tx errors */ | ||
1005 | stat_reg = readw (ioaddr + FramesAbortXSColls); | ||
1006 | np->stats.tx_aborted_errors += stat_reg; | ||
1007 | np->stats.tx_errors += stat_reg; | ||
1008 | |||
1009 | stat_reg = readw (ioaddr + CarrierSenseErrors); | ||
1010 | np->stats.tx_carrier_errors += stat_reg; | ||
1011 | np->stats.tx_errors += stat_reg; | ||
1012 | |||
1013 | /* Clear all other statistic register. */ | ||
1014 | readl (ioaddr + McstOctetXmtOk); | ||
1015 | readw (ioaddr + BcstFramesXmtdOk); | ||
1016 | readl (ioaddr + McstFramesXmtdOk); | ||
1017 | readw (ioaddr + BcstFramesRcvdOk); | ||
1018 | readw (ioaddr + MacControlFramesRcvd); | ||
1019 | readw (ioaddr + FrameTooLongErrors); | ||
1020 | readw (ioaddr + InRangeLengthErrors); | ||
1021 | readw (ioaddr + FramesCheckSeqErrors); | ||
1022 | readw (ioaddr + FramesLostRxErrors); | ||
1023 | readl (ioaddr + McstOctetXmtOk); | ||
1024 | readl (ioaddr + BcstOctetXmtOk); | ||
1025 | readl (ioaddr + McstFramesXmtdOk); | ||
1026 | readl (ioaddr + FramesWDeferredXmt); | ||
1027 | readl (ioaddr + LateCollisions); | ||
1028 | readw (ioaddr + BcstFramesXmtdOk); | ||
1029 | readw (ioaddr + MacControlFramesXmtd); | ||
1030 | readw (ioaddr + FramesWEXDeferal); | ||
1031 | |||
1032 | #ifdef MEM_MAPPING | ||
1033 | for (i = 0x100; i <= 0x150; i += 4) | ||
1034 | readl (ioaddr + i); | ||
1035 | #endif | ||
1036 | readw (ioaddr + TxJumboFrames); | ||
1037 | readw (ioaddr + RxJumboFrames); | ||
1038 | readw (ioaddr + TCPCheckSumErrors); | ||
1039 | readw (ioaddr + UDPCheckSumErrors); | ||
1040 | readw (ioaddr + IPCheckSumErrors); | ||
1041 | return &np->stats; | ||
1042 | } | ||
1043 | |||
1044 | static int | ||
1045 | clear_stats (struct net_device *dev) | ||
1046 | { | ||
1047 | long ioaddr = dev->base_addr; | ||
1048 | #ifdef MEM_MAPPING | ||
1049 | int i; | ||
1050 | #endif | ||
1051 | |||
1052 | /* All statistics registers need to be acknowledged, | ||
1053 | else statistic overflow could cause problems */ | ||
1054 | readl (ioaddr + FramesRcvOk); | ||
1055 | readl (ioaddr + FramesXmtOk); | ||
1056 | readl (ioaddr + OctetRcvOk); | ||
1057 | readl (ioaddr + OctetXmtOk); | ||
1058 | |||
1059 | readl (ioaddr + McstFramesRcvdOk); | ||
1060 | readl (ioaddr + SingleColFrames); | ||
1061 | readl (ioaddr + MultiColFrames); | ||
1062 | readl (ioaddr + LateCollisions); | ||
1063 | /* detailed rx errors */ | ||
1064 | readw (ioaddr + FrameTooLongErrors); | ||
1065 | readw (ioaddr + InRangeLengthErrors); | ||
1066 | readw (ioaddr + FramesCheckSeqErrors); | ||
1067 | readw (ioaddr + FramesLostRxErrors); | ||
1068 | |||
1069 | /* detailed tx errors */ | ||
1070 | readw (ioaddr + FramesAbortXSColls); | ||
1071 | readw (ioaddr + CarrierSenseErrors); | ||
1072 | |||
1073 | /* Clear all other statistic register. */ | ||
1074 | readl (ioaddr + McstOctetXmtOk); | ||
1075 | readw (ioaddr + BcstFramesXmtdOk); | ||
1076 | readl (ioaddr + McstFramesXmtdOk); | ||
1077 | readw (ioaddr + BcstFramesRcvdOk); | ||
1078 | readw (ioaddr + MacControlFramesRcvd); | ||
1079 | readl (ioaddr + McstOctetXmtOk); | ||
1080 | readl (ioaddr + BcstOctetXmtOk); | ||
1081 | readl (ioaddr + McstFramesXmtdOk); | ||
1082 | readl (ioaddr + FramesWDeferredXmt); | ||
1083 | readw (ioaddr + BcstFramesXmtdOk); | ||
1084 | readw (ioaddr + MacControlFramesXmtd); | ||
1085 | readw (ioaddr + FramesWEXDeferal); | ||
1086 | #ifdef MEM_MAPPING | ||
1087 | for (i = 0x100; i <= 0x150; i += 4) | ||
1088 | readl (ioaddr + i); | ||
1089 | #endif | ||
1090 | readw (ioaddr + TxJumboFrames); | ||
1091 | readw (ioaddr + RxJumboFrames); | ||
1092 | readw (ioaddr + TCPCheckSumErrors); | ||
1093 | readw (ioaddr + UDPCheckSumErrors); | ||
1094 | readw (ioaddr + IPCheckSumErrors); | ||
1095 | return 0; | ||
1096 | } | ||
1097 | |||
1098 | |||
1099 | static int | ||
1100 | change_mtu (struct net_device *dev, int new_mtu) | ||
1101 | { | ||
1102 | struct netdev_private *np = netdev_priv(dev); | ||
1103 | int max = (np->jumbo) ? MAX_JUMBO : 1536; | ||
1104 | |||
1105 | if ((new_mtu < 68) || (new_mtu > max)) { | ||
1106 | return -EINVAL; | ||
1107 | } | ||
1108 | |||
1109 | dev->mtu = new_mtu; | ||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static void | ||
1115 | set_multicast (struct net_device *dev) | ||
1116 | { | ||
1117 | long ioaddr = dev->base_addr; | ||
1118 | u32 hash_table[2]; | ||
1119 | u16 rx_mode = 0; | ||
1120 | struct netdev_private *np = netdev_priv(dev); | ||
1121 | |||
1122 | hash_table[0] = hash_table[1] = 0; | ||
1123 | /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ | ||
1124 | hash_table[1] |= 0x02000000; | ||
1125 | if (dev->flags & IFF_PROMISC) { | ||
1126 | /* Receive all frames promiscuously. */ | ||
1127 | rx_mode = ReceiveAllFrames; | ||
1128 | } else if ((dev->flags & IFF_ALLMULTI) || | ||
1129 | (netdev_mc_count(dev) > multicast_filter_limit)) { | ||
1130 | /* Receive broadcast and multicast frames */ | ||
1131 | rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; | ||
1132 | } else if (!netdev_mc_empty(dev)) { | ||
1133 | struct netdev_hw_addr *ha; | ||
1134 | /* Receive broadcast frames and multicast frames filtering | ||
1135 | by Hashtable */ | ||
1136 | rx_mode = | ||
1137 | ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; | ||
1138 | netdev_for_each_mc_addr(ha, dev) { | ||
1139 | int bit, index = 0; | ||
1140 | int crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
1141 | /* The inverted high significant 6 bits of CRC are | ||
1142 | used as an index to hashtable */ | ||
1143 | for (bit = 0; bit < 6; bit++) | ||
1144 | if (crc & (1 << (31 - bit))) | ||
1145 | index |= (1 << bit); | ||
1146 | hash_table[index / 32] |= (1 << (index % 32)); | ||
1147 | } | ||
1148 | } else { | ||
1149 | rx_mode = ReceiveBroadcast | ReceiveUnicast; | ||
1150 | } | ||
1151 | if (np->vlan) { | ||
1152 | /* ReceiveVLANMatch field in ReceiveMode */ | ||
1153 | rx_mode |= ReceiveVLANMatch; | ||
1154 | } | ||
1155 | |||
1156 | writel (hash_table[0], ioaddr + HashTable0); | ||
1157 | writel (hash_table[1], ioaddr + HashTable1); | ||
1158 | writew (rx_mode, ioaddr + ReceiveMode); | ||
1159 | } | ||
1160 | |||
1161 | static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1162 | { | ||
1163 | struct netdev_private *np = netdev_priv(dev); | ||
1164 | strcpy(info->driver, "dl2k"); | ||
1165 | strcpy(info->version, DRV_VERSION); | ||
1166 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
1167 | } | ||
1168 | |||
1169 | static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1170 | { | ||
1171 | struct netdev_private *np = netdev_priv(dev); | ||
1172 | if (np->phy_media) { | ||
1173 | /* fiber device */ | ||
1174 | cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; | ||
1175 | cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; | ||
1176 | cmd->port = PORT_FIBRE; | ||
1177 | cmd->transceiver = XCVR_INTERNAL; | ||
1178 | } else { | ||
1179 | /* copper device */ | ||
1180 | cmd->supported = SUPPORTED_10baseT_Half | | ||
1181 | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | ||
1182 | | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | | ||
1183 | SUPPORTED_Autoneg | SUPPORTED_MII; | ||
1184 | cmd->advertising = ADVERTISED_10baseT_Half | | ||
1185 | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | | ||
1186 | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| | ||
1187 | ADVERTISED_Autoneg | ADVERTISED_MII; | ||
1188 | cmd->port = PORT_MII; | ||
1189 | cmd->transceiver = XCVR_INTERNAL; | ||
1190 | } | ||
1191 | if ( np->link_status ) { | ||
1192 | ethtool_cmd_speed_set(cmd, np->speed); | ||
1193 | cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; | ||
1194 | } else { | ||
1195 | ethtool_cmd_speed_set(cmd, -1); | ||
1196 | cmd->duplex = -1; | ||
1197 | } | ||
1198 | if ( np->an_enable) | ||
1199 | cmd->autoneg = AUTONEG_ENABLE; | ||
1200 | else | ||
1201 | cmd->autoneg = AUTONEG_DISABLE; | ||
1202 | |||
1203 | cmd->phy_address = np->phy_addr; | ||
1204 | return 0; | ||
1205 | } | ||
1206 | |||
1207 | static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1208 | { | ||
1209 | struct netdev_private *np = netdev_priv(dev); | ||
1210 | netif_carrier_off(dev); | ||
1211 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
1212 | if (np->an_enable) | ||
1213 | return 0; | ||
1214 | else { | ||
1215 | np->an_enable = 1; | ||
1216 | mii_set_media(dev); | ||
1217 | return 0; | ||
1218 | } | ||
1219 | } else { | ||
1220 | np->an_enable = 0; | ||
1221 | if (np->speed == 1000) { | ||
1222 | ethtool_cmd_speed_set(cmd, SPEED_100); | ||
1223 | cmd->duplex = DUPLEX_FULL; | ||
1224 | printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); | ||
1225 | } | ||
1226 | switch (ethtool_cmd_speed(cmd)) { | ||
1227 | case SPEED_10: | ||
1228 | np->speed = 10; | ||
1229 | np->full_duplex = (cmd->duplex == DUPLEX_FULL); | ||
1230 | break; | ||
1231 | case SPEED_100: | ||
1232 | np->speed = 100; | ||
1233 | np->full_duplex = (cmd->duplex == DUPLEX_FULL); | ||
1234 | break; | ||
1235 | case SPEED_1000: /* not supported */ | ||
1236 | default: | ||
1237 | return -EINVAL; | ||
1238 | } | ||
1239 | mii_set_media(dev); | ||
1240 | } | ||
1241 | return 0; | ||
1242 | } | ||
1243 | |||
1244 | static u32 rio_get_link(struct net_device *dev) | ||
1245 | { | ||
1246 | struct netdev_private *np = netdev_priv(dev); | ||
1247 | return np->link_status; | ||
1248 | } | ||
1249 | |||
1250 | static const struct ethtool_ops ethtool_ops = { | ||
1251 | .get_drvinfo = rio_get_drvinfo, | ||
1252 | .get_settings = rio_get_settings, | ||
1253 | .set_settings = rio_set_settings, | ||
1254 | .get_link = rio_get_link, | ||
1255 | }; | ||
1256 | |||
1257 | static int | ||
1258 | rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | ||
1259 | { | ||
1260 | int phy_addr; | ||
1261 | struct netdev_private *np = netdev_priv(dev); | ||
1262 | struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; | ||
1263 | |||
1264 | struct netdev_desc *desc; | ||
1265 | int i; | ||
1266 | |||
1267 | phy_addr = np->phy_addr; | ||
1268 | switch (cmd) { | ||
1269 | case SIOCDEVPRIVATE: | ||
1270 | break; | ||
1271 | |||
1272 | case SIOCDEVPRIVATE + 1: | ||
1273 | miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); | ||
1274 | break; | ||
1275 | case SIOCDEVPRIVATE + 2: | ||
1276 | mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); | ||
1277 | break; | ||
1278 | case SIOCDEVPRIVATE + 3: | ||
1279 | break; | ||
1280 | case SIOCDEVPRIVATE + 4: | ||
1281 | break; | ||
1282 | case SIOCDEVPRIVATE + 5: | ||
1283 | netif_stop_queue (dev); | ||
1284 | break; | ||
1285 | case SIOCDEVPRIVATE + 6: | ||
1286 | netif_wake_queue (dev); | ||
1287 | break; | ||
1288 | case SIOCDEVPRIVATE + 7: | ||
1289 | printk | ||
1290 | ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", | ||
1291 | netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx, | ||
1292 | np->old_rx); | ||
1293 | break; | ||
1294 | case SIOCDEVPRIVATE + 8: | ||
1295 | printk("TX ring:\n"); | ||
1296 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1297 | desc = &np->tx_ring[i]; | ||
1298 | printk | ||
1299 | ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", | ||
1300 | i, | ||
1301 | (u32) (np->tx_ring_dma + i * sizeof (*desc)), | ||
1302 | (u32)le64_to_cpu(desc->next_desc), | ||
1303 | (u32)le64_to_cpu(desc->status), | ||
1304 | (u32)(le64_to_cpu(desc->fraginfo) >> 32), | ||
1305 | (u32)le64_to_cpu(desc->fraginfo)); | ||
1306 | printk ("\n"); | ||
1307 | } | ||
1308 | printk ("\n"); | ||
1309 | break; | ||
1310 | |||
1311 | default: | ||
1312 | return -EOPNOTSUPP; | ||
1313 | } | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | #define EEP_READ 0x0200 | ||
1318 | #define EEP_BUSY 0x8000 | ||
1319 | /* Read the EEPROM word */ | ||
1320 | /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ | ||
1321 | static int | ||
1322 | read_eeprom (long ioaddr, int eep_addr) | ||
1323 | { | ||
1324 | int i = 1000; | ||
1325 | outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); | ||
1326 | while (i-- > 0) { | ||
1327 | if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { | ||
1328 | return inw (ioaddr + EepromData); | ||
1329 | } | ||
1330 | } | ||
1331 | return 0; | ||
1332 | } | ||
1333 | |||
1334 | enum phy_ctrl_bits { | ||
1335 | MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, | ||
1336 | MII_DUPLEX = 0x08, | ||
1337 | }; | ||
1338 | |||
1339 | #define mii_delay() readb(ioaddr) | ||
1340 | static void | ||
1341 | mii_sendbit (struct net_device *dev, u32 data) | ||
1342 | { | ||
1343 | long ioaddr = dev->base_addr + PhyCtrl; | ||
1344 | data = (data) ? MII_DATA1 : 0; | ||
1345 | data |= MII_WRITE; | ||
1346 | data |= (readb (ioaddr) & 0xf8) | MII_WRITE; | ||
1347 | writeb (data, ioaddr); | ||
1348 | mii_delay (); | ||
1349 | writeb (data | MII_CLK, ioaddr); | ||
1350 | mii_delay (); | ||
1351 | } | ||
1352 | |||
1353 | static int | ||
1354 | mii_getbit (struct net_device *dev) | ||
1355 | { | ||
1356 | long ioaddr = dev->base_addr + PhyCtrl; | ||
1357 | u8 data; | ||
1358 | |||
1359 | data = (readb (ioaddr) & 0xf8) | MII_READ; | ||
1360 | writeb (data, ioaddr); | ||
1361 | mii_delay (); | ||
1362 | writeb (data | MII_CLK, ioaddr); | ||
1363 | mii_delay (); | ||
1364 | return ((readb (ioaddr) >> 1) & 1); | ||
1365 | } | ||
1366 | |||
1367 | static void | ||
1368 | mii_send_bits (struct net_device *dev, u32 data, int len) | ||
1369 | { | ||
1370 | int i; | ||
1371 | for (i = len - 1; i >= 0; i--) { | ||
1372 | mii_sendbit (dev, data & (1 << i)); | ||
1373 | } | ||
1374 | } | ||
1375 | |||
1376 | static int | ||
1377 | mii_read (struct net_device *dev, int phy_addr, int reg_num) | ||
1378 | { | ||
1379 | u32 cmd; | ||
1380 | int i; | ||
1381 | u32 retval = 0; | ||
1382 | |||
1383 | /* Preamble */ | ||
1384 | mii_send_bits (dev, 0xffffffff, 32); | ||
1385 | /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ | ||
1386 | /* ST,OP = 0110'b for read operation */ | ||
1387 | cmd = (0x06 << 10 | phy_addr << 5 | reg_num); | ||
1388 | mii_send_bits (dev, cmd, 14); | ||
1389 | /* Turnaround */ | ||
1390 | if (mii_getbit (dev)) | ||
1391 | goto err_out; | ||
1392 | /* Read data */ | ||
1393 | for (i = 0; i < 16; i++) { | ||
1394 | retval |= mii_getbit (dev); | ||
1395 | retval <<= 1; | ||
1396 | } | ||
1397 | /* End cycle */ | ||
1398 | mii_getbit (dev); | ||
1399 | return (retval >> 1) & 0xffff; | ||
1400 | |||
1401 | err_out: | ||
1402 | return 0; | ||
1403 | } | ||
1404 | static int | ||
1405 | mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) | ||
1406 | { | ||
1407 | u32 cmd; | ||
1408 | |||
1409 | /* Preamble */ | ||
1410 | mii_send_bits (dev, 0xffffffff, 32); | ||
1411 | /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ | ||
1412 | /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ | ||
1413 | cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; | ||
1414 | mii_send_bits (dev, cmd, 32); | ||
1415 | /* End cycle */ | ||
1416 | mii_getbit (dev); | ||
1417 | return 0; | ||
1418 | } | ||
1419 | static int | ||
1420 | mii_wait_link (struct net_device *dev, int wait) | ||
1421 | { | ||
1422 | __u16 bmsr; | ||
1423 | int phy_addr; | ||
1424 | struct netdev_private *np; | ||
1425 | |||
1426 | np = netdev_priv(dev); | ||
1427 | phy_addr = np->phy_addr; | ||
1428 | |||
1429 | do { | ||
1430 | bmsr = mii_read (dev, phy_addr, MII_BMSR); | ||
1431 | if (bmsr & MII_BMSR_LINK_STATUS) | ||
1432 | return 0; | ||
1433 | mdelay (1); | ||
1434 | } while (--wait > 0); | ||
1435 | return -1; | ||
1436 | } | ||
1437 | static int | ||
1438 | mii_get_media (struct net_device *dev) | ||
1439 | { | ||
1440 | __u16 negotiate; | ||
1441 | __u16 bmsr; | ||
1442 | __u16 mscr; | ||
1443 | __u16 mssr; | ||
1444 | int phy_addr; | ||
1445 | struct netdev_private *np; | ||
1446 | |||
1447 | np = netdev_priv(dev); | ||
1448 | phy_addr = np->phy_addr; | ||
1449 | |||
1450 | bmsr = mii_read (dev, phy_addr, MII_BMSR); | ||
1451 | if (np->an_enable) { | ||
1452 | if (!(bmsr & MII_BMSR_AN_COMPLETE)) { | ||
1453 | /* Auto-Negotiation not completed */ | ||
1454 | return -1; | ||
1455 | } | ||
1456 | negotiate = mii_read (dev, phy_addr, MII_ANAR) & | ||
1457 | mii_read (dev, phy_addr, MII_ANLPAR); | ||
1458 | mscr = mii_read (dev, phy_addr, MII_MSCR); | ||
1459 | mssr = mii_read (dev, phy_addr, MII_MSSR); | ||
1460 | if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) { | ||
1461 | np->speed = 1000; | ||
1462 | np->full_duplex = 1; | ||
1463 | printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); | ||
1464 | } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) { | ||
1465 | np->speed = 1000; | ||
1466 | np->full_duplex = 0; | ||
1467 | printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); | ||
1468 | } else if (negotiate & MII_ANAR_100BX_FD) { | ||
1469 | np->speed = 100; | ||
1470 | np->full_duplex = 1; | ||
1471 | printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); | ||
1472 | } else if (negotiate & MII_ANAR_100BX_HD) { | ||
1473 | np->speed = 100; | ||
1474 | np->full_duplex = 0; | ||
1475 | printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); | ||
1476 | } else if (negotiate & MII_ANAR_10BT_FD) { | ||
1477 | np->speed = 10; | ||
1478 | np->full_duplex = 1; | ||
1479 | printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); | ||
1480 | } else if (negotiate & MII_ANAR_10BT_HD) { | ||
1481 | np->speed = 10; | ||
1482 | np->full_duplex = 0; | ||
1483 | printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); | ||
1484 | } | ||
1485 | if (negotiate & MII_ANAR_PAUSE) { | ||
1486 | np->tx_flow &= 1; | ||
1487 | np->rx_flow &= 1; | ||
1488 | } else if (negotiate & MII_ANAR_ASYMMETRIC) { | ||
1489 | np->tx_flow = 0; | ||
1490 | np->rx_flow &= 1; | ||
1491 | } | ||
1492 | /* else tx_flow, rx_flow = user select */ | ||
1493 | } else { | ||
1494 | __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); | ||
1495 | switch (bmcr & (MII_BMCR_SPEED_100 | MII_BMCR_SPEED_1000)) { | ||
1496 | case MII_BMCR_SPEED_1000: | ||
1497 | printk (KERN_INFO "Operating at 1000 Mbps, "); | ||
1498 | break; | ||
1499 | case MII_BMCR_SPEED_100: | ||
1500 | printk (KERN_INFO "Operating at 100 Mbps, "); | ||
1501 | break; | ||
1502 | case 0: | ||
1503 | printk (KERN_INFO "Operating at 10 Mbps, "); | ||
1504 | } | ||
1505 | if (bmcr & MII_BMCR_DUPLEX_MODE) { | ||
1506 | printk (KERN_CONT "Full duplex\n"); | ||
1507 | } else { | ||
1508 | printk (KERN_CONT "Half duplex\n"); | ||
1509 | } | ||
1510 | } | ||
1511 | if (np->tx_flow) | ||
1512 | printk(KERN_INFO "Enable Tx Flow Control\n"); | ||
1513 | else | ||
1514 | printk(KERN_INFO "Disable Tx Flow Control\n"); | ||
1515 | if (np->rx_flow) | ||
1516 | printk(KERN_INFO "Enable Rx Flow Control\n"); | ||
1517 | else | ||
1518 | printk(KERN_INFO "Disable Rx Flow Control\n"); | ||
1519 | |||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | static int | ||
1524 | mii_set_media (struct net_device *dev) | ||
1525 | { | ||
1526 | __u16 pscr; | ||
1527 | __u16 bmcr; | ||
1528 | __u16 bmsr; | ||
1529 | __u16 anar; | ||
1530 | int phy_addr; | ||
1531 | struct netdev_private *np; | ||
1532 | np = netdev_priv(dev); | ||
1533 | phy_addr = np->phy_addr; | ||
1534 | |||
1535 | /* Does user set speed? */ | ||
1536 | if (np->an_enable) { | ||
1537 | /* Advertise capabilities */ | ||
1538 | bmsr = mii_read (dev, phy_addr, MII_BMSR); | ||
1539 | anar = mii_read (dev, phy_addr, MII_ANAR) & | ||
1540 | ~MII_ANAR_100BX_FD & | ||
1541 | ~MII_ANAR_100BX_HD & | ||
1542 | ~MII_ANAR_100BT4 & | ||
1543 | ~MII_ANAR_10BT_FD & | ||
1544 | ~MII_ANAR_10BT_HD; | ||
1545 | if (bmsr & MII_BMSR_100BX_FD) | ||
1546 | anar |= MII_ANAR_100BX_FD; | ||
1547 | if (bmsr & MII_BMSR_100BX_HD) | ||
1548 | anar |= MII_ANAR_100BX_HD; | ||
1549 | if (bmsr & MII_BMSR_100BT4) | ||
1550 | anar |= MII_ANAR_100BT4; | ||
1551 | if (bmsr & MII_BMSR_10BT_FD) | ||
1552 | anar |= MII_ANAR_10BT_FD; | ||
1553 | if (bmsr & MII_BMSR_10BT_HD) | ||
1554 | anar |= MII_ANAR_10BT_HD; | ||
1555 | anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC; | ||
1556 | mii_write (dev, phy_addr, MII_ANAR, anar); | ||
1557 | |||
1558 | /* Enable Auto crossover */ | ||
1559 | pscr = mii_read (dev, phy_addr, MII_PHY_SCR); | ||
1560 | pscr |= 3 << 5; /* 11'b */ | ||
1561 | mii_write (dev, phy_addr, MII_PHY_SCR, pscr); | ||
1562 | |||
1563 | /* Soft reset PHY */ | ||
1564 | mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); | ||
1565 | bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | MII_BMCR_RESET; | ||
1566 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1567 | mdelay(1); | ||
1568 | } else { | ||
1569 | /* Force speed setting */ | ||
1570 | /* 1) Disable Auto crossover */ | ||
1571 | pscr = mii_read (dev, phy_addr, MII_PHY_SCR); | ||
1572 | pscr &= ~(3 << 5); | ||
1573 | mii_write (dev, phy_addr, MII_PHY_SCR, pscr); | ||
1574 | |||
1575 | /* 2) PHY Reset */ | ||
1576 | bmcr = mii_read (dev, phy_addr, MII_BMCR); | ||
1577 | bmcr |= MII_BMCR_RESET; | ||
1578 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1579 | |||
1580 | /* 3) Power Down */ | ||
1581 | bmcr = 0x1940; /* must be 0x1940 */ | ||
1582 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1583 | mdelay (100); /* wait a certain time */ | ||
1584 | |||
1585 | /* 4) Advertise nothing */ | ||
1586 | mii_write (dev, phy_addr, MII_ANAR, 0); | ||
1587 | |||
1588 | /* 5) Set media and Power Up */ | ||
1589 | bmcr = MII_BMCR_POWER_DOWN; | ||
1590 | if (np->speed == 100) { | ||
1591 | bmcr |= MII_BMCR_SPEED_100; | ||
1592 | printk (KERN_INFO "Manual 100 Mbps, "); | ||
1593 | } else if (np->speed == 10) { | ||
1594 | printk (KERN_INFO "Manual 10 Mbps, "); | ||
1595 | } | ||
1596 | if (np->full_duplex) { | ||
1597 | bmcr |= MII_BMCR_DUPLEX_MODE; | ||
1598 | printk (KERN_CONT "Full duplex\n"); | ||
1599 | } else { | ||
1600 | printk (KERN_CONT "Half duplex\n"); | ||
1601 | } | ||
1602 | #if 0 | ||
1603 | /* Set 1000BaseT Master/Slave setting */ | ||
1604 | mscr = mii_read (dev, phy_addr, MII_MSCR); | ||
1605 | mscr |= MII_MSCR_CFG_ENABLE; | ||
1606 | mscr &= ~MII_MSCR_CFG_VALUE = 0; | ||
1607 | #endif | ||
1608 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1609 | mdelay(10); | ||
1610 | } | ||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | static int | ||
1615 | mii_get_media_pcs (struct net_device *dev) | ||
1616 | { | ||
1617 | __u16 negotiate; | ||
1618 | __u16 bmsr; | ||
1619 | int phy_addr; | ||
1620 | struct netdev_private *np; | ||
1621 | |||
1622 | np = netdev_priv(dev); | ||
1623 | phy_addr = np->phy_addr; | ||
1624 | |||
1625 | bmsr = mii_read (dev, phy_addr, PCS_BMSR); | ||
1626 | if (np->an_enable) { | ||
1627 | if (!(bmsr & MII_BMSR_AN_COMPLETE)) { | ||
1628 | /* Auto-Negotiation not completed */ | ||
1629 | return -1; | ||
1630 | } | ||
1631 | negotiate = mii_read (dev, phy_addr, PCS_ANAR) & | ||
1632 | mii_read (dev, phy_addr, PCS_ANLPAR); | ||
1633 | np->speed = 1000; | ||
1634 | if (negotiate & PCS_ANAR_FULL_DUPLEX) { | ||
1635 | printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); | ||
1636 | np->full_duplex = 1; | ||
1637 | } else { | ||
1638 | printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); | ||
1639 | np->full_duplex = 0; | ||
1640 | } | ||
1641 | if (negotiate & PCS_ANAR_PAUSE) { | ||
1642 | np->tx_flow &= 1; | ||
1643 | np->rx_flow &= 1; | ||
1644 | } else if (negotiate & PCS_ANAR_ASYMMETRIC) { | ||
1645 | np->tx_flow = 0; | ||
1646 | np->rx_flow &= 1; | ||
1647 | } | ||
1648 | /* else tx_flow, rx_flow = user select */ | ||
1649 | } else { | ||
1650 | __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); | ||
1651 | printk (KERN_INFO "Operating at 1000 Mbps, "); | ||
1652 | if (bmcr & MII_BMCR_DUPLEX_MODE) { | ||
1653 | printk (KERN_CONT "Full duplex\n"); | ||
1654 | } else { | ||
1655 | printk (KERN_CONT "Half duplex\n"); | ||
1656 | } | ||
1657 | } | ||
1658 | if (np->tx_flow) | ||
1659 | printk(KERN_INFO "Enable Tx Flow Control\n"); | ||
1660 | else | ||
1661 | printk(KERN_INFO "Disable Tx Flow Control\n"); | ||
1662 | if (np->rx_flow) | ||
1663 | printk(KERN_INFO "Enable Rx Flow Control\n"); | ||
1664 | else | ||
1665 | printk(KERN_INFO "Disable Rx Flow Control\n"); | ||
1666 | |||
1667 | return 0; | ||
1668 | } | ||
1669 | |||
1670 | static int | ||
1671 | mii_set_media_pcs (struct net_device *dev) | ||
1672 | { | ||
1673 | __u16 bmcr; | ||
1674 | __u16 esr; | ||
1675 | __u16 anar; | ||
1676 | int phy_addr; | ||
1677 | struct netdev_private *np; | ||
1678 | np = netdev_priv(dev); | ||
1679 | phy_addr = np->phy_addr; | ||
1680 | |||
1681 | /* Auto-Negotiation? */ | ||
1682 | if (np->an_enable) { | ||
1683 | /* Advertise capabilities */ | ||
1684 | esr = mii_read (dev, phy_addr, PCS_ESR); | ||
1685 | anar = mii_read (dev, phy_addr, MII_ANAR) & | ||
1686 | ~PCS_ANAR_HALF_DUPLEX & | ||
1687 | ~PCS_ANAR_FULL_DUPLEX; | ||
1688 | if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) | ||
1689 | anar |= PCS_ANAR_HALF_DUPLEX; | ||
1690 | if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) | ||
1691 | anar |= PCS_ANAR_FULL_DUPLEX; | ||
1692 | anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; | ||
1693 | mii_write (dev, phy_addr, MII_ANAR, anar); | ||
1694 | |||
1695 | /* Soft reset PHY */ | ||
1696 | mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); | ||
1697 | bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | | ||
1698 | MII_BMCR_RESET; | ||
1699 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1700 | mdelay(1); | ||
1701 | } else { | ||
1702 | /* Force speed setting */ | ||
1703 | /* PHY Reset */ | ||
1704 | bmcr = MII_BMCR_RESET; | ||
1705 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1706 | mdelay(10); | ||
1707 | if (np->full_duplex) { | ||
1708 | bmcr = MII_BMCR_DUPLEX_MODE; | ||
1709 | printk (KERN_INFO "Manual full duplex\n"); | ||
1710 | } else { | ||
1711 | bmcr = 0; | ||
1712 | printk (KERN_INFO "Manual half duplex\n"); | ||
1713 | } | ||
1714 | mii_write (dev, phy_addr, MII_BMCR, bmcr); | ||
1715 | mdelay(10); | ||
1716 | |||
1717 | /* Advertise nothing */ | ||
1718 | mii_write (dev, phy_addr, MII_ANAR, 0); | ||
1719 | } | ||
1720 | return 0; | ||
1721 | } | ||
1722 | |||
1723 | |||
1724 | static int | ||
1725 | rio_close (struct net_device *dev) | ||
1726 | { | ||
1727 | long ioaddr = dev->base_addr; | ||
1728 | struct netdev_private *np = netdev_priv(dev); | ||
1729 | struct sk_buff *skb; | ||
1730 | int i; | ||
1731 | |||
1732 | netif_stop_queue (dev); | ||
1733 | |||
1734 | /* Disable interrupts */ | ||
1735 | writew (0, ioaddr + IntEnable); | ||
1736 | |||
1737 | /* Stop Tx and Rx logics */ | ||
1738 | writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); | ||
1739 | |||
1740 | free_irq (dev->irq, dev); | ||
1741 | del_timer_sync (&np->timer); | ||
1742 | |||
1743 | /* Free all the skbuffs in the queue. */ | ||
1744 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1745 | skb = np->rx_skbuff[i]; | ||
1746 | if (skb) { | ||
1747 | pci_unmap_single(np->pdev, | ||
1748 | desc_to_dma(&np->rx_ring[i]), | ||
1749 | skb->len, PCI_DMA_FROMDEVICE); | ||
1750 | dev_kfree_skb (skb); | ||
1751 | np->rx_skbuff[i] = NULL; | ||
1752 | } | ||
1753 | np->rx_ring[i].status = 0; | ||
1754 | np->rx_ring[i].fraginfo = 0; | ||
1755 | } | ||
1756 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1757 | skb = np->tx_skbuff[i]; | ||
1758 | if (skb) { | ||
1759 | pci_unmap_single(np->pdev, | ||
1760 | desc_to_dma(&np->tx_ring[i]), | ||
1761 | skb->len, PCI_DMA_TODEVICE); | ||
1762 | dev_kfree_skb (skb); | ||
1763 | np->tx_skbuff[i] = NULL; | ||
1764 | } | ||
1765 | } | ||
1766 | |||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | static void __devexit | ||
1771 | rio_remove1 (struct pci_dev *pdev) | ||
1772 | { | ||
1773 | struct net_device *dev = pci_get_drvdata (pdev); | ||
1774 | |||
1775 | if (dev) { | ||
1776 | struct netdev_private *np = netdev_priv(dev); | ||
1777 | |||
1778 | unregister_netdev (dev); | ||
1779 | pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, | ||
1780 | np->rx_ring_dma); | ||
1781 | pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, | ||
1782 | np->tx_ring_dma); | ||
1783 | #ifdef MEM_MAPPING | ||
1784 | iounmap ((char *) (dev->base_addr)); | ||
1785 | #endif | ||
1786 | free_netdev (dev); | ||
1787 | pci_release_regions (pdev); | ||
1788 | pci_disable_device (pdev); | ||
1789 | } | ||
1790 | pci_set_drvdata (pdev, NULL); | ||
1791 | } | ||
1792 | |||
1793 | static struct pci_driver rio_driver = { | ||
1794 | .name = "dl2k", | ||
1795 | .id_table = rio_pci_tbl, | ||
1796 | .probe = rio_probe1, | ||
1797 | .remove = __devexit_p(rio_remove1), | ||
1798 | }; | ||
1799 | |||
1800 | static int __init | ||
1801 | rio_init (void) | ||
1802 | { | ||
1803 | return pci_register_driver(&rio_driver); | ||
1804 | } | ||
1805 | |||
1806 | static void __exit | ||
1807 | rio_exit (void) | ||
1808 | { | ||
1809 | pci_unregister_driver (&rio_driver); | ||
1810 | } | ||
1811 | |||
1812 | module_init (rio_init); | ||
1813 | module_exit (rio_exit); | ||
1814 | |||
1815 | /* | ||
1816 | |||
1817 | Compile command: | ||
1818 | |||
1819 | gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c | ||
1820 | |||
1821 | Read Documentation/networking/dl2k.txt for details. | ||
1822 | |||
1823 | */ | ||
1824 | |||
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h new file mode 100644 index 000000000000..7caab3d26a9e --- /dev/null +++ b/drivers/net/ethernet/dlink/dl2k.h | |||
@@ -0,0 +1,554 @@ | |||
1 | /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ | ||
2 | /* | ||
3 | Copyright (c) 2001, 2002 by D-Link Corporation | ||
4 | Written by Edward Peng.<edward_peng@dlink.com.tw> | ||
5 | Created 03-May-2001, base on Linux' sundance.c. | ||
6 | |||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of the GNU General Public License as published by | ||
9 | the Free Software Foundation; either version 2 of the License, or | ||
10 | (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef __DL2K_H__ | ||
14 | #define __DL2K_H__ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/crc32.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <asm/processor.h> /* Processor type for cache alignment. */ | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/time.h> | ||
38 | #define TX_RING_SIZE 256 | ||
39 | #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/ | ||
40 | #define RX_RING_SIZE 256 | ||
41 | #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) | ||
42 | #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) | ||
43 | |||
44 | /* This driver was written to use PCI memory space, however x86-oriented | ||
45 | hardware often uses I/O space accesses. */ | ||
46 | #ifndef MEM_MAPPING | ||
47 | #undef readb | ||
48 | #undef readw | ||
49 | #undef readl | ||
50 | #undef writeb | ||
51 | #undef writew | ||
52 | #undef writel | ||
53 | #define readb inb | ||
54 | #define readw inw | ||
55 | #define readl inl | ||
56 | #define writeb outb | ||
57 | #define writew outw | ||
58 | #define writel outl | ||
59 | #endif | ||
60 | |||
61 | /* Offsets to the device registers. | ||
62 | Unlike software-only systems, device drivers interact with complex hardware. | ||
63 | It's not useful to define symbolic names for every register bit in the | ||
64 | device. The name can only partially document the semantics and make | ||
65 | the driver longer and more difficult to read. | ||
66 | In general, only the important configuration values or bits changed | ||
67 | multiple times should be defined symbolically. | ||
68 | */ | ||
69 | enum dl2x_offsets { | ||
70 | /* I/O register offsets */ | ||
71 | DMACtrl = 0x00, | ||
72 | RxDMAStatus = 0x08, | ||
73 | TFDListPtr0 = 0x10, | ||
74 | TFDListPtr1 = 0x14, | ||
75 | TxDMABurstThresh = 0x18, | ||
76 | TxDMAUrgentThresh = 0x19, | ||
77 | TxDMAPollPeriod = 0x1a, | ||
78 | RFDListPtr0 = 0x1c, | ||
79 | RFDListPtr1 = 0x20, | ||
80 | RxDMABurstThresh = 0x24, | ||
81 | RxDMAUrgentThresh = 0x25, | ||
82 | RxDMAPollPeriod = 0x26, | ||
83 | RxDMAIntCtrl = 0x28, | ||
84 | DebugCtrl = 0x2c, | ||
85 | ASICCtrl = 0x30, | ||
86 | FifoCtrl = 0x38, | ||
87 | RxEarlyThresh = 0x3a, | ||
88 | FlowOffThresh = 0x3c, | ||
89 | FlowOnThresh = 0x3e, | ||
90 | TxStartThresh = 0x44, | ||
91 | EepromData = 0x48, | ||
92 | EepromCtrl = 0x4a, | ||
93 | ExpromAddr = 0x4c, | ||
94 | Exprodata = 0x50, | ||
95 | WakeEvent = 0x51, | ||
96 | CountDown = 0x54, | ||
97 | IntStatusAck = 0x5a, | ||
98 | IntEnable = 0x5c, | ||
99 | IntStatus = 0x5e, | ||
100 | TxStatus = 0x60, | ||
101 | MACCtrl = 0x6c, | ||
102 | VLANTag = 0x70, | ||
103 | PhyCtrl = 0x76, | ||
104 | StationAddr0 = 0x78, | ||
105 | StationAddr1 = 0x7a, | ||
106 | StationAddr2 = 0x7c, | ||
107 | VLANId = 0x80, | ||
108 | MaxFrameSize = 0x86, | ||
109 | ReceiveMode = 0x88, | ||
110 | HashTable0 = 0x8c, | ||
111 | HashTable1 = 0x90, | ||
112 | RmonStatMask = 0x98, | ||
113 | StatMask = 0x9c, | ||
114 | RxJumboFrames = 0xbc, | ||
115 | TCPCheckSumErrors = 0xc0, | ||
116 | IPCheckSumErrors = 0xc2, | ||
117 | UDPCheckSumErrors = 0xc4, | ||
118 | TxJumboFrames = 0xf4, | ||
119 | /* Ethernet MIB statistic register offsets */ | ||
120 | OctetRcvOk = 0xa8, | ||
121 | McstOctetRcvOk = 0xac, | ||
122 | BcstOctetRcvOk = 0xb0, | ||
123 | FramesRcvOk = 0xb4, | ||
124 | McstFramesRcvdOk = 0xb8, | ||
125 | BcstFramesRcvdOk = 0xbe, | ||
126 | MacControlFramesRcvd = 0xc6, | ||
127 | FrameTooLongErrors = 0xc8, | ||
128 | InRangeLengthErrors = 0xca, | ||
129 | FramesCheckSeqErrors = 0xcc, | ||
130 | FramesLostRxErrors = 0xce, | ||
131 | OctetXmtOk = 0xd0, | ||
132 | McstOctetXmtOk = 0xd4, | ||
133 | BcstOctetXmtOk = 0xd8, | ||
134 | FramesXmtOk = 0xdc, | ||
135 | McstFramesXmtdOk = 0xe0, | ||
136 | FramesWDeferredXmt = 0xe4, | ||
137 | LateCollisions = 0xe8, | ||
138 | MultiColFrames = 0xec, | ||
139 | SingleColFrames = 0xf0, | ||
140 | BcstFramesXmtdOk = 0xf6, | ||
141 | CarrierSenseErrors = 0xf8, | ||
142 | MacControlFramesXmtd = 0xfa, | ||
143 | FramesAbortXSColls = 0xfc, | ||
144 | FramesWEXDeferal = 0xfe, | ||
145 | /* RMON statistic register offsets */ | ||
146 | EtherStatsCollisions = 0x100, | ||
147 | EtherStatsOctetsTransmit = 0x104, | ||
148 | EtherStatsPktsTransmit = 0x108, | ||
149 | EtherStatsPkts64OctetTransmit = 0x10c, | ||
150 | EtherStats65to127OctetsTransmit = 0x110, | ||
151 | EtherStatsPkts128to255OctetsTransmit = 0x114, | ||
152 | EtherStatsPkts256to511OctetsTransmit = 0x118, | ||
153 | EtherStatsPkts512to1023OctetsTransmit = 0x11c, | ||
154 | EtherStatsPkts1024to1518OctetsTransmit = 0x120, | ||
155 | EtherStatsCRCAlignErrors = 0x124, | ||
156 | EtherStatsUndersizePkts = 0x128, | ||
157 | EtherStatsFragments = 0x12c, | ||
158 | EtherStatsJabbers = 0x130, | ||
159 | EtherStatsOctets = 0x134, | ||
160 | EtherStatsPkts = 0x138, | ||
161 | EtherStats64Octets = 0x13c, | ||
162 | EtherStatsPkts65to127Octets = 0x140, | ||
163 | EtherStatsPkts128to255Octets = 0x144, | ||
164 | EtherStatsPkts256to511Octets = 0x148, | ||
165 | EtherStatsPkts512to1023Octets = 0x14c, | ||
166 | EtherStatsPkts1024to1518Octets = 0x150, | ||
167 | }; | ||
168 | |||
169 | /* Bits in the interrupt status/mask registers. */ | ||
170 | enum IntStatus_bits { | ||
171 | InterruptStatus = 0x0001, | ||
172 | HostError = 0x0002, | ||
173 | MACCtrlFrame = 0x0008, | ||
174 | TxComplete = 0x0004, | ||
175 | RxComplete = 0x0010, | ||
176 | RxEarly = 0x0020, | ||
177 | IntRequested = 0x0040, | ||
178 | UpdateStats = 0x0080, | ||
179 | LinkEvent = 0x0100, | ||
180 | TxDMAComplete = 0x0200, | ||
181 | RxDMAComplete = 0x0400, | ||
182 | RFDListEnd = 0x0800, | ||
183 | RxDMAPriority = 0x1000, | ||
184 | }; | ||
185 | |||
186 | /* Bits in the ReceiveMode register. */ | ||
187 | enum ReceiveMode_bits { | ||
188 | ReceiveUnicast = 0x0001, | ||
189 | ReceiveMulticast = 0x0002, | ||
190 | ReceiveBroadcast = 0x0004, | ||
191 | ReceiveAllFrames = 0x0008, | ||
192 | ReceiveMulticastHash = 0x0010, | ||
193 | ReceiveIPMulticast = 0x0020, | ||
194 | ReceiveVLANMatch = 0x0100, | ||
195 | ReceiveVLANHash = 0x0200, | ||
196 | }; | ||
197 | /* Bits in MACCtrl. */ | ||
198 | enum MACCtrl_bits { | ||
199 | DuplexSelect = 0x20, | ||
200 | TxFlowControlEnable = 0x80, | ||
201 | RxFlowControlEnable = 0x0100, | ||
202 | RcvFCS = 0x200, | ||
203 | AutoVLANtagging = 0x1000, | ||
204 | AutoVLANuntagging = 0x2000, | ||
205 | StatsEnable = 0x00200000, | ||
206 | StatsDisable = 0x00400000, | ||
207 | StatsEnabled = 0x00800000, | ||
208 | TxEnable = 0x01000000, | ||
209 | TxDisable = 0x02000000, | ||
210 | TxEnabled = 0x04000000, | ||
211 | RxEnable = 0x08000000, | ||
212 | RxDisable = 0x10000000, | ||
213 | RxEnabled = 0x20000000, | ||
214 | }; | ||
215 | |||
216 | enum ASICCtrl_LoWord_bits { | ||
217 | PhyMedia = 0x0080, | ||
218 | }; | ||
219 | |||
220 | enum ASICCtrl_HiWord_bits { | ||
221 | GlobalReset = 0x0001, | ||
222 | RxReset = 0x0002, | ||
223 | TxReset = 0x0004, | ||
224 | DMAReset = 0x0008, | ||
225 | FIFOReset = 0x0010, | ||
226 | NetworkReset = 0x0020, | ||
227 | HostReset = 0x0040, | ||
228 | ResetBusy = 0x0400, | ||
229 | }; | ||
230 | |||
231 | /* Transmit Frame Control bits */ | ||
232 | enum TFC_bits { | ||
233 | DwordAlign = 0x00000000, | ||
234 | WordAlignDisable = 0x00030000, | ||
235 | WordAlign = 0x00020000, | ||
236 | TCPChecksumEnable = 0x00040000, | ||
237 | UDPChecksumEnable = 0x00080000, | ||
238 | IPChecksumEnable = 0x00100000, | ||
239 | FCSAppendDisable = 0x00200000, | ||
240 | TxIndicate = 0x00400000, | ||
241 | TxDMAIndicate = 0x00800000, | ||
242 | FragCountShift = 24, | ||
243 | VLANTagInsert = 0x0000000010000000, | ||
244 | TFDDone = 0x80000000, | ||
245 | VIDShift = 32, | ||
246 | UsePriorityShift = 48, | ||
247 | }; | ||
248 | |||
249 | /* Receive Frames Status bits */ | ||
250 | enum RFS_bits { | ||
251 | RxFIFOOverrun = 0x00010000, | ||
252 | RxRuntFrame = 0x00020000, | ||
253 | RxAlignmentError = 0x00040000, | ||
254 | RxFCSError = 0x00080000, | ||
255 | RxOverSizedFrame = 0x00100000, | ||
256 | RxLengthError = 0x00200000, | ||
257 | VLANDetected = 0x00400000, | ||
258 | TCPDetected = 0x00800000, | ||
259 | TCPError = 0x01000000, | ||
260 | UDPDetected = 0x02000000, | ||
261 | UDPError = 0x04000000, | ||
262 | IPDetected = 0x08000000, | ||
263 | IPError = 0x10000000, | ||
264 | FrameStart = 0x20000000, | ||
265 | FrameEnd = 0x40000000, | ||
266 | RFDDone = 0x80000000, | ||
267 | TCIShift = 32, | ||
268 | RFS_Errors = 0x003f0000, | ||
269 | }; | ||
270 | |||
271 | #define MII_RESET_TIME_OUT 10000 | ||
272 | /* MII register */ | ||
273 | enum _mii_reg { | ||
274 | MII_BMCR = 0, | ||
275 | MII_BMSR = 1, | ||
276 | MII_PHY_ID1 = 2, | ||
277 | MII_PHY_ID2 = 3, | ||
278 | MII_ANAR = 4, | ||
279 | MII_ANLPAR = 5, | ||
280 | MII_ANER = 6, | ||
281 | MII_ANNPT = 7, | ||
282 | MII_ANLPRNP = 8, | ||
283 | MII_MSCR = 9, | ||
284 | MII_MSSR = 10, | ||
285 | MII_ESR = 15, | ||
286 | MII_PHY_SCR = 16, | ||
287 | }; | ||
288 | /* PCS register */ | ||
289 | enum _pcs_reg { | ||
290 | PCS_BMCR = 0, | ||
291 | PCS_BMSR = 1, | ||
292 | PCS_ANAR = 4, | ||
293 | PCS_ANLPAR = 5, | ||
294 | PCS_ANER = 6, | ||
295 | PCS_ANNPT = 7, | ||
296 | PCS_ANLPRNP = 8, | ||
297 | PCS_ESR = 15, | ||
298 | }; | ||
299 | |||
300 | /* Basic Mode Control Register */ | ||
301 | enum _mii_bmcr { | ||
302 | MII_BMCR_RESET = 0x8000, | ||
303 | MII_BMCR_LOOP_BACK = 0x4000, | ||
304 | MII_BMCR_SPEED_LSB = 0x2000, | ||
305 | MII_BMCR_AN_ENABLE = 0x1000, | ||
306 | MII_BMCR_POWER_DOWN = 0x0800, | ||
307 | MII_BMCR_ISOLATE = 0x0400, | ||
308 | MII_BMCR_RESTART_AN = 0x0200, | ||
309 | MII_BMCR_DUPLEX_MODE = 0x0100, | ||
310 | MII_BMCR_COL_TEST = 0x0080, | ||
311 | MII_BMCR_SPEED_MSB = 0x0040, | ||
312 | MII_BMCR_SPEED_RESERVED = 0x003f, | ||
313 | MII_BMCR_SPEED_10 = 0, | ||
314 | MII_BMCR_SPEED_100 = MII_BMCR_SPEED_LSB, | ||
315 | MII_BMCR_SPEED_1000 = MII_BMCR_SPEED_MSB, | ||
316 | }; | ||
317 | |||
318 | /* Basic Mode Status Register */ | ||
319 | enum _mii_bmsr { | ||
320 | MII_BMSR_100BT4 = 0x8000, | ||
321 | MII_BMSR_100BX_FD = 0x4000, | ||
322 | MII_BMSR_100BX_HD = 0x2000, | ||
323 | MII_BMSR_10BT_FD = 0x1000, | ||
324 | MII_BMSR_10BT_HD = 0x0800, | ||
325 | MII_BMSR_100BT2_FD = 0x0400, | ||
326 | MII_BMSR_100BT2_HD = 0x0200, | ||
327 | MII_BMSR_EXT_STATUS = 0x0100, | ||
328 | MII_BMSR_PREAMBLE_SUPP = 0x0040, | ||
329 | MII_BMSR_AN_COMPLETE = 0x0020, | ||
330 | MII_BMSR_REMOTE_FAULT = 0x0010, | ||
331 | MII_BMSR_AN_ABILITY = 0x0008, | ||
332 | MII_BMSR_LINK_STATUS = 0x0004, | ||
333 | MII_BMSR_JABBER_DETECT = 0x0002, | ||
334 | MII_BMSR_EXT_CAP = 0x0001, | ||
335 | }; | ||
336 | |||
337 | /* ANAR */ | ||
338 | enum _mii_anar { | ||
339 | MII_ANAR_NEXT_PAGE = 0x8000, | ||
340 | MII_ANAR_REMOTE_FAULT = 0x4000, | ||
341 | MII_ANAR_ASYMMETRIC = 0x0800, | ||
342 | MII_ANAR_PAUSE = 0x0400, | ||
343 | MII_ANAR_100BT4 = 0x0200, | ||
344 | MII_ANAR_100BX_FD = 0x0100, | ||
345 | MII_ANAR_100BX_HD = 0x0080, | ||
346 | MII_ANAR_10BT_FD = 0x0020, | ||
347 | MII_ANAR_10BT_HD = 0x0010, | ||
348 | MII_ANAR_SELECTOR = 0x001f, | ||
349 | MII_IEEE8023_CSMACD = 0x0001, | ||
350 | }; | ||
351 | |||
352 | /* ANLPAR */ | ||
353 | enum _mii_anlpar { | ||
354 | MII_ANLPAR_NEXT_PAGE = MII_ANAR_NEXT_PAGE, | ||
355 | MII_ANLPAR_REMOTE_FAULT = MII_ANAR_REMOTE_FAULT, | ||
356 | MII_ANLPAR_ASYMMETRIC = MII_ANAR_ASYMMETRIC, | ||
357 | MII_ANLPAR_PAUSE = MII_ANAR_PAUSE, | ||
358 | MII_ANLPAR_100BT4 = MII_ANAR_100BT4, | ||
359 | MII_ANLPAR_100BX_FD = MII_ANAR_100BX_FD, | ||
360 | MII_ANLPAR_100BX_HD = MII_ANAR_100BX_HD, | ||
361 | MII_ANLPAR_10BT_FD = MII_ANAR_10BT_FD, | ||
362 | MII_ANLPAR_10BT_HD = MII_ANAR_10BT_HD, | ||
363 | MII_ANLPAR_SELECTOR = MII_ANAR_SELECTOR, | ||
364 | }; | ||
365 | |||
366 | /* Auto-Negotiation Expansion Register */ | ||
367 | enum _mii_aner { | ||
368 | MII_ANER_PAR_DETECT_FAULT = 0x0010, | ||
369 | MII_ANER_LP_NEXTPAGABLE = 0x0008, | ||
370 | MII_ANER_NETXTPAGABLE = 0x0004, | ||
371 | MII_ANER_PAGE_RECEIVED = 0x0002, | ||
372 | MII_ANER_LP_NEGOTIABLE = 0x0001, | ||
373 | }; | ||
374 | |||
375 | /* MASTER-SLAVE Control Register */ | ||
376 | enum _mii_mscr { | ||
377 | MII_MSCR_TEST_MODE = 0xe000, | ||
378 | MII_MSCR_CFG_ENABLE = 0x1000, | ||
379 | MII_MSCR_CFG_VALUE = 0x0800, | ||
380 | MII_MSCR_PORT_VALUE = 0x0400, | ||
381 | MII_MSCR_1000BT_FD = 0x0200, | ||
382 | MII_MSCR_1000BT_HD = 0X0100, | ||
383 | }; | ||
384 | |||
385 | /* MASTER-SLAVE Status Register */ | ||
386 | enum _mii_mssr { | ||
387 | MII_MSSR_CFG_FAULT = 0x8000, | ||
388 | MII_MSSR_CFG_RES = 0x4000, | ||
389 | MII_MSSR_LOCAL_RCV_STATUS = 0x2000, | ||
390 | MII_MSSR_REMOTE_RCVR = 0x1000, | ||
391 | MII_MSSR_LP_1000BT_FD = 0x0800, | ||
392 | MII_MSSR_LP_1000BT_HD = 0x0400, | ||
393 | MII_MSSR_IDLE_ERR_COUNT = 0x00ff, | ||
394 | }; | ||
395 | |||
396 | /* IEEE Extened Status Register */ | ||
397 | enum _mii_esr { | ||
398 | MII_ESR_1000BX_FD = 0x8000, | ||
399 | MII_ESR_1000BX_HD = 0x4000, | ||
400 | MII_ESR_1000BT_FD = 0x2000, | ||
401 | MII_ESR_1000BT_HD = 0x1000, | ||
402 | }; | ||
403 | /* PHY Specific Control Register */ | ||
404 | #if 0 | ||
405 | typedef union t_MII_PHY_SCR { | ||
406 | u16 image; | ||
407 | struct { | ||
408 | u16 disable_jabber:1; // bit 0 | ||
409 | u16 polarity_reversal:1; // bit 1 | ||
410 | u16 SEQ_test:1; // bit 2 | ||
411 | u16 _bit_3:1; // bit 3 | ||
412 | u16 disable_CLK125:1; // bit 4 | ||
413 | u16 mdi_crossover_mode:2; // bit 6:5 | ||
414 | u16 enable_ext_dist:1; // bit 7 | ||
415 | u16 _bit_8_9:2; // bit 9:8 | ||
416 | u16 force_link:1; // bit 10 | ||
417 | u16 assert_CRS:1; // bit 11 | ||
418 | u16 rcv_fifo_depth:2; // bit 13:12 | ||
419 | u16 xmit_fifo_depth:2; // bit 15:14 | ||
420 | } bits; | ||
421 | } PHY_SCR_t, *PPHY_SCR_t; | ||
422 | #endif | ||
423 | |||
424 | typedef enum t_MII_ADMIN_STATUS { | ||
425 | adm_reset, | ||
426 | adm_operational, | ||
427 | adm_loopback, | ||
428 | adm_power_down, | ||
429 | adm_isolate | ||
430 | } MII_ADMIN_t, *PMII_ADMIN_t; | ||
431 | |||
432 | /* Physical Coding Sublayer Management (PCS) */ | ||
433 | /* PCS control and status registers bitmap as the same as MII */ | ||
434 | /* PCS Extended Status register bitmap as the same as MII */ | ||
435 | /* PCS ANAR */ | ||
436 | enum _pcs_anar { | ||
437 | PCS_ANAR_NEXT_PAGE = 0x8000, | ||
438 | PCS_ANAR_REMOTE_FAULT = 0x3000, | ||
439 | PCS_ANAR_ASYMMETRIC = 0x0100, | ||
440 | PCS_ANAR_PAUSE = 0x0080, | ||
441 | PCS_ANAR_HALF_DUPLEX = 0x0040, | ||
442 | PCS_ANAR_FULL_DUPLEX = 0x0020, | ||
443 | }; | ||
444 | /* PCS ANLPAR */ | ||
445 | enum _pcs_anlpar { | ||
446 | PCS_ANLPAR_NEXT_PAGE = PCS_ANAR_NEXT_PAGE, | ||
447 | PCS_ANLPAR_REMOTE_FAULT = PCS_ANAR_REMOTE_FAULT, | ||
448 | PCS_ANLPAR_ASYMMETRIC = PCS_ANAR_ASYMMETRIC, | ||
449 | PCS_ANLPAR_PAUSE = PCS_ANAR_PAUSE, | ||
450 | PCS_ANLPAR_HALF_DUPLEX = PCS_ANAR_HALF_DUPLEX, | ||
451 | PCS_ANLPAR_FULL_DUPLEX = PCS_ANAR_FULL_DUPLEX, | ||
452 | }; | ||
453 | |||
454 | typedef struct t_SROM { | ||
455 | u16 config_param; /* 0x00 */ | ||
456 | u16 asic_ctrl; /* 0x02 */ | ||
457 | u16 sub_vendor_id; /* 0x04 */ | ||
458 | u16 sub_system_id; /* 0x06 */ | ||
459 | u16 reserved1[12]; /* 0x08-0x1f */ | ||
460 | u8 mac_addr[6]; /* 0x20-0x25 */ | ||
461 | u8 reserved2[10]; /* 0x26-0x2f */ | ||
462 | u8 sib[204]; /* 0x30-0xfb */ | ||
463 | u32 crc; /* 0xfc-0xff */ | ||
464 | } SROM_t, *PSROM_t; | ||
465 | |||
466 | /* Ioctl custom data */ | ||
467 | struct ioctl_data { | ||
468 | char signature[10]; | ||
469 | int cmd; | ||
470 | int len; | ||
471 | char *data; | ||
472 | }; | ||
473 | |||
474 | struct mii_data { | ||
475 | __u16 reserved; | ||
476 | __u16 reg_num; | ||
477 | __u16 in_value; | ||
478 | __u16 out_value; | ||
479 | }; | ||
480 | |||
481 | /* The Rx and Tx buffer descriptors. */ | ||
482 | struct netdev_desc { | ||
483 | __le64 next_desc; | ||
484 | __le64 status; | ||
485 | __le64 fraginfo; | ||
486 | }; | ||
487 | |||
488 | #define PRIV_ALIGN 15 /* Required alignment mask */ | ||
489 | /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment | ||
490 | within the structure. */ | ||
491 | struct netdev_private { | ||
492 | /* Descriptor rings first for alignment. */ | ||
493 | struct netdev_desc *rx_ring; | ||
494 | struct netdev_desc *tx_ring; | ||
495 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | ||
496 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | ||
497 | dma_addr_t tx_ring_dma; | ||
498 | dma_addr_t rx_ring_dma; | ||
499 | struct pci_dev *pdev; | ||
500 | spinlock_t tx_lock; | ||
501 | spinlock_t rx_lock; | ||
502 | struct net_device_stats stats; | ||
503 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | ||
504 | unsigned int speed; /* Operating speed */ | ||
505 | unsigned int vlan; /* VLAN Id */ | ||
506 | unsigned int chip_id; /* PCI table chip id */ | ||
507 | unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */ | ||
508 | unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */ | ||
509 | unsigned int tx_coalesce; /* Maximum frames each tx interrupt */ | ||
510 | unsigned int full_duplex:1; /* Full-duplex operation requested. */ | ||
511 | unsigned int an_enable:2; /* Auto-Negotiated Enable */ | ||
512 | unsigned int jumbo:1; /* Jumbo frame enable */ | ||
513 | unsigned int coalesce:1; /* Rx coalescing enable */ | ||
514 | unsigned int tx_flow:1; /* Tx flow control enable */ | ||
515 | unsigned int rx_flow:1; /* Rx flow control enable */ | ||
516 | unsigned int phy_media:1; /* 1: fiber, 0: copper */ | ||
517 | unsigned int link_status:1; /* Current link status */ | ||
518 | struct netdev_desc *last_tx; /* Last Tx descriptor used. */ | ||
519 | unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */ | ||
520 | unsigned long cur_tx, old_tx; | ||
521 | struct timer_list timer; | ||
522 | int wake_polarity; | ||
523 | char name[256]; /* net device description */ | ||
524 | u8 duplex_polarity; | ||
525 | u16 mcast_filter[4]; | ||
526 | u16 advertising; /* NWay media advertisement */ | ||
527 | u16 negotiate; /* Negotiated media */ | ||
528 | int phy_addr; /* PHY addresses. */ | ||
529 | }; | ||
530 | |||
531 | /* The station address location in the EEPROM. */ | ||
532 | /* The struct pci_device_id consist of: | ||
533 | vendor, device Vendor and device ID to match (or PCI_ANY_ID) | ||
534 | subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID) | ||
535 | class Device class to match. The class_mask tells which bits | ||
536 | class_mask of the class are honored during the comparison. | ||
537 | driver_data Data private to the driver. | ||
538 | */ | ||
539 | |||
540 | static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = { | ||
541 | {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, | ||
542 | {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, | ||
543 | { } | ||
544 | }; | ||
545 | MODULE_DEVICE_TABLE (pci, rio_pci_tbl); | ||
546 | #define TX_TIMEOUT (4*HZ) | ||
547 | #define PACKET_SIZE 1536 | ||
548 | #define MAX_JUMBO 8000 | ||
549 | #define RIO_IO_SIZE 340 | ||
550 | #define DEFAULT_RXC 5 | ||
551 | #define DEFAULT_RXT 750 | ||
552 | #define DEFAULT_TXC 1 | ||
553 | #define MAX_TXC 8 | ||
554 | #endif /* __DL2K_H__ */ | ||
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c new file mode 100644 index 000000000000..4793df843c24 --- /dev/null +++ b/drivers/net/ethernet/dlink/sundance.c | |||
@@ -0,0 +1,1940 @@ | |||
1 | /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ | ||
2 | /* | ||
3 | Written 1999-2000 by Donald Becker. | ||
4 | |||
5 | This software may be used and distributed according to the terms of | ||
6 | the GNU General Public License (GPL), incorporated herein by reference. | ||
7 | Drivers based on or derived from this code fall under the GPL and must | ||
8 | retain the authorship, copyright and license notice. This file is not | ||
9 | a complete program and may only be used when the entire operating | ||
10 | system is licensed under the GPL. | ||
11 | |||
12 | The author may be reached as becker@scyld.com, or C/O | ||
13 | Scyld Computing Corporation | ||
14 | 410 Severn Ave., Suite 210 | ||
15 | Annapolis MD 21403 | ||
16 | |||
17 | Support and updates available at | ||
18 | http://www.scyld.com/network/sundance.html | ||
19 | [link no longer provides useful info -jgarzik] | ||
20 | Archives of the mailing list are still available at | ||
21 | http://www.beowulf.org/pipermail/netdrivers/ | ||
22 | |||
23 | */ | ||
24 | |||
25 | #define DRV_NAME "sundance" | ||
26 | #define DRV_VERSION "1.2" | ||
27 | #define DRV_RELDATE "11-Sep-2006" | ||
28 | |||
29 | |||
30 | /* The user-configurable values. | ||
31 | These may be modified when a driver module is loaded.*/ | ||
32 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | ||
33 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | ||
34 | Typical is a 64 element hash table based on the Ethernet CRC. */ | ||
35 | static const int multicast_filter_limit = 32; | ||
36 | |||
37 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | ||
38 | Setting to > 1518 effectively disables this feature. | ||
39 | This chip can receive into offset buffers, so the Alpha does not | ||
40 | need a copy-align. */ | ||
41 | static int rx_copybreak; | ||
42 | static int flowctrl=1; | ||
43 | |||
44 | /* media[] specifies the media type the NIC operates at. | ||
45 | autosense Autosensing active media. | ||
46 | 10mbps_hd 10Mbps half duplex. | ||
47 | 10mbps_fd 10Mbps full duplex. | ||
48 | 100mbps_hd 100Mbps half duplex. | ||
49 | 100mbps_fd 100Mbps full duplex. | ||
50 | 0 Autosensing active media. | ||
51 | 1 10Mbps half duplex. | ||
52 | 2 10Mbps full duplex. | ||
53 | 3 100Mbps half duplex. | ||
54 | 4 100Mbps full duplex. | ||
55 | */ | ||
56 | #define MAX_UNITS 8 | ||
57 | static char *media[MAX_UNITS]; | ||
58 | |||
59 | |||
60 | /* Operational parameters that are set at compile time. */ | ||
61 | |||
62 | /* Keep the ring sizes a power of two for compile efficiency. | ||
63 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | ||
64 | Making the Tx ring too large decreases the effectiveness of channel | ||
65 | bonding and packet priority, and more than 128 requires modifying the | ||
66 | Tx error recovery. | ||
67 | Large receive rings merely waste memory. */ | ||
68 | #define TX_RING_SIZE 32 | ||
69 | #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ | ||
70 | #define RX_RING_SIZE 64 | ||
71 | #define RX_BUDGET 32 | ||
72 | #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) | ||
73 | #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) | ||
74 | |||
75 | /* Operational parameters that usually are not changed. */ | ||
76 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
77 | #define TX_TIMEOUT (4*HZ) | ||
78 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
79 | |||
80 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ | ||
81 | #include <linux/module.h> | ||
82 | #include <linux/kernel.h> | ||
83 | #include <linux/string.h> | ||
84 | #include <linux/timer.h> | ||
85 | #include <linux/errno.h> | ||
86 | #include <linux/ioport.h> | ||
87 | #include <linux/interrupt.h> | ||
88 | #include <linux/pci.h> | ||
89 | #include <linux/netdevice.h> | ||
90 | #include <linux/etherdevice.h> | ||
91 | #include <linux/skbuff.h> | ||
92 | #include <linux/init.h> | ||
93 | #include <linux/bitops.h> | ||
94 | #include <asm/uaccess.h> | ||
95 | #include <asm/processor.h> /* Processor type for cache alignment. */ | ||
96 | #include <asm/io.h> | ||
97 | #include <linux/delay.h> | ||
98 | #include <linux/spinlock.h> | ||
99 | #include <linux/dma-mapping.h> | ||
100 | #include <linux/crc32.h> | ||
101 | #include <linux/ethtool.h> | ||
102 | #include <linux/mii.h> | ||
103 | |||
104 | /* These identify the driver base version and may not be removed. */ | ||
105 | static const char version[] __devinitconst = | ||
106 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE | ||
107 | " Written by Donald Becker\n"; | ||
108 | |||
109 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | ||
110 | MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); | ||
111 | MODULE_LICENSE("GPL"); | ||
112 | |||
113 | module_param(debug, int, 0); | ||
114 | module_param(rx_copybreak, int, 0); | ||
115 | module_param_array(media, charp, NULL, 0); | ||
116 | module_param(flowctrl, int, 0); | ||
117 | MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); | ||
118 | MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); | ||
119 | MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); | ||
120 | |||
121 | /* | ||
122 | Theory of Operation | ||
123 | |||
124 | I. Board Compatibility | ||
125 | |||
126 | This driver is designed for the Sundance Technologies "Alta" ST201 chip. | ||
127 | |||
128 | II. Board-specific settings | ||
129 | |||
130 | III. Driver operation | ||
131 | |||
132 | IIIa. Ring buffers | ||
133 | |||
134 | This driver uses two statically allocated fixed-size descriptor lists | ||
135 | formed into rings by a branch from the final descriptor to the beginning of | ||
136 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. | ||
137 | Some chips explicitly use only 2^N sized rings, while others use a | ||
138 | 'next descriptor' pointer that the driver forms into rings. | ||
139 | |||
140 | IIIb/c. Transmit/Receive Structure | ||
141 | |||
142 | This driver uses a zero-copy receive and transmit scheme. | ||
143 | The driver allocates full frame size skbuffs for the Rx ring buffers at | ||
144 | open() time and passes the skb->data field to the chip as receive data | ||
145 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, | ||
146 | a fresh skbuff is allocated and the frame is copied to the new skbuff. | ||
147 | When the incoming frame is larger, the skbuff is passed directly up the | ||
148 | protocol stack. Buffers consumed this way are replaced by newly allocated | ||
149 | skbuffs in a later phase of receives. | ||
150 | |||
151 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by | ||
152 | using a full-sized skbuff for small frames vs. the copying costs of larger | ||
153 | frames. New boards are typically used in generously configured machines | ||
154 | and the underfilled buffers have negligible impact compared to the benefit of | ||
155 | a single allocation size, so the default value of zero results in never | ||
156 | copying packets. When copying is done, the cost is usually mitigated by using | ||
157 | a combined copy/checksum routine. Copying also preloads the cache, which is | ||
158 | most useful with small frames. | ||
159 | |||
160 | A subtle aspect of the operation is that the IP header at offset 14 in an | ||
161 | ethernet frame isn't longword aligned for further processing. | ||
162 | Unaligned buffers are permitted by the Sundance hardware, so | ||
163 | frames are received into the skbuff at an offset of "+2", 16-byte aligning | ||
164 | the IP header. | ||
165 | |||
166 | IIId. Synchronization | ||
167 | |||
168 | The driver runs as two independent, single-threaded flows of control. One | ||
169 | is the send-packet routine, which enforces single-threaded use by the | ||
170 | dev->tbusy flag. The other thread is the interrupt handler, which is single | ||
171 | threaded by the hardware and interrupt handling software. | ||
172 | |||
173 | The send packet thread has partial control over the Tx ring and 'dev->tbusy' | ||
174 | flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next | ||
175 | queue slot is empty, it clears the tbusy flag when finished otherwise it sets | ||
176 | the 'lp->tx_full' flag. | ||
177 | |||
178 | The interrupt handler has exclusive control over the Rx ring and records stats | ||
179 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as | ||
180 | empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it | ||
181 | clears both the tx_full and tbusy flags. | ||
182 | |||
183 | IV. Notes | ||
184 | |||
185 | IVb. References | ||
186 | |||
187 | The Sundance ST201 datasheet, preliminary version. | ||
188 | The Kendin KS8723 datasheet, preliminary version. | ||
189 | The ICplus IP100 datasheet, preliminary version. | ||
190 | http://www.scyld.com/expert/100mbps.html | ||
191 | http://www.scyld.com/expert/NWay.html | ||
192 | |||
193 | IVc. Errata | ||
194 | |||
195 | */ | ||
196 | |||
197 | /* Work-around for Kendin chip bugs. */ | ||
198 | #ifndef CONFIG_SUNDANCE_MMIO | ||
199 | #define USE_IO_OPS 1 | ||
200 | #endif | ||
201 | |||
202 | static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = { | ||
203 | { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, | ||
204 | { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, | ||
205 | { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, | ||
206 | { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, | ||
207 | { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | ||
208 | { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, | ||
209 | { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, | ||
210 | { } | ||
211 | }; | ||
212 | MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); | ||
213 | |||
214 | enum { | ||
215 | netdev_io_size = 128 | ||
216 | }; | ||
217 | |||
218 | struct pci_id_info { | ||
219 | const char *name; | ||
220 | }; | ||
221 | static const struct pci_id_info pci_id_tbl[] __devinitdata = { | ||
222 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, | ||
223 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, | ||
224 | {"D-Link DFE-580TX 4 port Server Adapter"}, | ||
225 | {"D-Link DFE-530TXS FAST Ethernet Adapter"}, | ||
226 | {"D-Link DL10050-based FAST Ethernet Adapter"}, | ||
227 | {"Sundance Technology Alta"}, | ||
228 | {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, | ||
229 | { } /* terminate list. */ | ||
230 | }; | ||
231 | |||
232 | /* This driver was written to use PCI memory space, however x86-oriented | ||
233 | hardware often uses I/O space accesses. */ | ||
234 | |||
235 | /* Offsets to the device registers. | ||
236 | Unlike software-only systems, device drivers interact with complex hardware. | ||
237 | It's not useful to define symbolic names for every register bit in the | ||
238 | device. The name can only partially document the semantics and make | ||
239 | the driver longer and more difficult to read. | ||
240 | In general, only the important configuration values or bits changed | ||
241 | multiple times should be defined symbolically. | ||
242 | */ | ||
243 | enum alta_offsets { | ||
244 | DMACtrl = 0x00, | ||
245 | TxListPtr = 0x04, | ||
246 | TxDMABurstThresh = 0x08, | ||
247 | TxDMAUrgentThresh = 0x09, | ||
248 | TxDMAPollPeriod = 0x0a, | ||
249 | RxDMAStatus = 0x0c, | ||
250 | RxListPtr = 0x10, | ||
251 | DebugCtrl0 = 0x1a, | ||
252 | DebugCtrl1 = 0x1c, | ||
253 | RxDMABurstThresh = 0x14, | ||
254 | RxDMAUrgentThresh = 0x15, | ||
255 | RxDMAPollPeriod = 0x16, | ||
256 | LEDCtrl = 0x1a, | ||
257 | ASICCtrl = 0x30, | ||
258 | EEData = 0x34, | ||
259 | EECtrl = 0x36, | ||
260 | FlashAddr = 0x40, | ||
261 | FlashData = 0x44, | ||
262 | TxStatus = 0x46, | ||
263 | TxFrameId = 0x47, | ||
264 | DownCounter = 0x18, | ||
265 | IntrClear = 0x4a, | ||
266 | IntrEnable = 0x4c, | ||
267 | IntrStatus = 0x4e, | ||
268 | MACCtrl0 = 0x50, | ||
269 | MACCtrl1 = 0x52, | ||
270 | StationAddr = 0x54, | ||
271 | MaxFrameSize = 0x5A, | ||
272 | RxMode = 0x5c, | ||
273 | MIICtrl = 0x5e, | ||
274 | MulticastFilter0 = 0x60, | ||
275 | MulticastFilter1 = 0x64, | ||
276 | RxOctetsLow = 0x68, | ||
277 | RxOctetsHigh = 0x6a, | ||
278 | TxOctetsLow = 0x6c, | ||
279 | TxOctetsHigh = 0x6e, | ||
280 | TxFramesOK = 0x70, | ||
281 | RxFramesOK = 0x72, | ||
282 | StatsCarrierError = 0x74, | ||
283 | StatsLateColl = 0x75, | ||
284 | StatsMultiColl = 0x76, | ||
285 | StatsOneColl = 0x77, | ||
286 | StatsTxDefer = 0x78, | ||
287 | RxMissed = 0x79, | ||
288 | StatsTxXSDefer = 0x7a, | ||
289 | StatsTxAbort = 0x7b, | ||
290 | StatsBcastTx = 0x7c, | ||
291 | StatsBcastRx = 0x7d, | ||
292 | StatsMcastTx = 0x7e, | ||
293 | StatsMcastRx = 0x7f, | ||
294 | /* Aliased and bogus values! */ | ||
295 | RxStatus = 0x0c, | ||
296 | }; | ||
297 | |||
298 | #define ASIC_HI_WORD(x) ((x) + 2) | ||
299 | |||
300 | enum ASICCtrl_HiWord_bit { | ||
301 | GlobalReset = 0x0001, | ||
302 | RxReset = 0x0002, | ||
303 | TxReset = 0x0004, | ||
304 | DMAReset = 0x0008, | ||
305 | FIFOReset = 0x0010, | ||
306 | NetworkReset = 0x0020, | ||
307 | HostReset = 0x0040, | ||
308 | ResetBusy = 0x0400, | ||
309 | }; | ||
310 | |||
311 | /* Bits in the interrupt status/mask registers. */ | ||
312 | enum intr_status_bits { | ||
313 | IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, | ||
314 | IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, | ||
315 | IntrDrvRqst=0x0040, | ||
316 | StatsMax=0x0080, LinkChange=0x0100, | ||
317 | IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, | ||
318 | }; | ||
319 | |||
320 | /* Bits in the RxMode register. */ | ||
321 | enum rx_mode_bits { | ||
322 | AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, | ||
323 | AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, | ||
324 | }; | ||
325 | /* Bits in MACCtrl. */ | ||
326 | enum mac_ctrl0_bits { | ||
327 | EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, | ||
328 | EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, | ||
329 | }; | ||
330 | enum mac_ctrl1_bits { | ||
331 | StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, | ||
332 | TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, | ||
333 | RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, | ||
334 | }; | ||
335 | |||
336 | /* The Rx and Tx buffer descriptors. */ | ||
337 | /* Note that using only 32 bit fields simplifies conversion to big-endian | ||
338 | architectures. */ | ||
339 | struct netdev_desc { | ||
340 | __le32 next_desc; | ||
341 | __le32 status; | ||
342 | struct desc_frag { __le32 addr, length; } frag[1]; | ||
343 | }; | ||
344 | |||
345 | /* Bits in netdev_desc.status */ | ||
346 | enum desc_status_bits { | ||
347 | DescOwn=0x8000, | ||
348 | DescEndPacket=0x4000, | ||
349 | DescEndRing=0x2000, | ||
350 | LastFrag=0x80000000, | ||
351 | DescIntrOnTx=0x8000, | ||
352 | DescIntrOnDMADone=0x80000000, | ||
353 | DisableAlign = 0x00000001, | ||
354 | }; | ||
355 | |||
356 | #define PRIV_ALIGN 15 /* Required alignment mask */ | ||
357 | /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment | ||
358 | within the structure. */ | ||
359 | #define MII_CNT 4 | ||
360 | struct netdev_private { | ||
361 | /* Descriptor rings first for alignment. */ | ||
362 | struct netdev_desc *rx_ring; | ||
363 | struct netdev_desc *tx_ring; | ||
364 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | ||
365 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
366 | dma_addr_t tx_ring_dma; | ||
367 | dma_addr_t rx_ring_dma; | ||
368 | struct timer_list timer; /* Media monitoring timer. */ | ||
369 | /* ethtool extra stats */ | ||
370 | struct { | ||
371 | u64 tx_multiple_collisions; | ||
372 | u64 tx_single_collisions; | ||
373 | u64 tx_late_collisions; | ||
374 | u64 tx_deferred; | ||
375 | u64 tx_deferred_excessive; | ||
376 | u64 tx_aborted; | ||
377 | u64 tx_bcasts; | ||
378 | u64 rx_bcasts; | ||
379 | u64 tx_mcasts; | ||
380 | u64 rx_mcasts; | ||
381 | } xstats; | ||
382 | /* Frequently used values: keep some adjacent for cache effect. */ | ||
383 | spinlock_t lock; | ||
384 | int msg_enable; | ||
385 | int chip_id; | ||
386 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | ||
387 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | ||
388 | struct netdev_desc *last_tx; /* Last Tx descriptor used. */ | ||
389 | unsigned int cur_tx, dirty_tx; | ||
390 | /* These values are keep track of the transceiver/media in use. */ | ||
391 | unsigned int flowctrl:1; | ||
392 | unsigned int default_port:4; /* Last dev->if_port value. */ | ||
393 | unsigned int an_enable:1; | ||
394 | unsigned int speed; | ||
395 | struct tasklet_struct rx_tasklet; | ||
396 | struct tasklet_struct tx_tasklet; | ||
397 | int budget; | ||
398 | int cur_task; | ||
399 | /* Multicast and receive mode. */ | ||
400 | spinlock_t mcastlock; /* SMP lock multicast updates. */ | ||
401 | u16 mcast_filter[4]; | ||
402 | /* MII transceiver section. */ | ||
403 | struct mii_if_info mii_if; | ||
404 | int mii_preamble_required; | ||
405 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ | ||
406 | struct pci_dev *pci_dev; | ||
407 | void __iomem *base; | ||
408 | spinlock_t statlock; | ||
409 | }; | ||
410 | |||
411 | /* The station address location in the EEPROM. */ | ||
412 | #define EEPROM_SA_OFFSET 0x10 | ||
413 | #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ | ||
414 | IntrDrvRqst | IntrTxDone | StatsMax | \ | ||
415 | LinkChange) | ||
416 | |||
417 | static int change_mtu(struct net_device *dev, int new_mtu); | ||
418 | static int eeprom_read(void __iomem *ioaddr, int location); | ||
419 | static int mdio_read(struct net_device *dev, int phy_id, int location); | ||
420 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | ||
421 | static int mdio_wait_link(struct net_device *dev, int wait); | ||
422 | static int netdev_open(struct net_device *dev); | ||
423 | static void check_duplex(struct net_device *dev); | ||
424 | static void netdev_timer(unsigned long data); | ||
425 | static void tx_timeout(struct net_device *dev); | ||
426 | static void init_ring(struct net_device *dev); | ||
427 | static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); | ||
428 | static int reset_tx (struct net_device *dev); | ||
429 | static irqreturn_t intr_handler(int irq, void *dev_instance); | ||
430 | static void rx_poll(unsigned long data); | ||
431 | static void tx_poll(unsigned long data); | ||
432 | static void refill_rx (struct net_device *dev); | ||
433 | static void netdev_error(struct net_device *dev, int intr_status); | ||
434 | static void netdev_error(struct net_device *dev, int intr_status); | ||
435 | static void set_rx_mode(struct net_device *dev); | ||
436 | static int __set_mac_addr(struct net_device *dev); | ||
437 | static int sundance_set_mac_addr(struct net_device *dev, void *data); | ||
438 | static struct net_device_stats *get_stats(struct net_device *dev); | ||
439 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
440 | static int netdev_close(struct net_device *dev); | ||
441 | static const struct ethtool_ops ethtool_ops; | ||
442 | |||
443 | static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) | ||
444 | { | ||
445 | struct netdev_private *np = netdev_priv(dev); | ||
446 | void __iomem *ioaddr = np->base + ASICCtrl; | ||
447 | int countdown; | ||
448 | |||
449 | /* ST201 documentation states ASICCtrl is a 32bit register */ | ||
450 | iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); | ||
451 | /* ST201 documentation states reset can take up to 1 ms */ | ||
452 | countdown = 10 + 1; | ||
453 | while (ioread32 (ioaddr) & (ResetBusy << 16)) { | ||
454 | if (--countdown == 0) { | ||
455 | printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); | ||
456 | break; | ||
457 | } | ||
458 | udelay(100); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | static const struct net_device_ops netdev_ops = { | ||
463 | .ndo_open = netdev_open, | ||
464 | .ndo_stop = netdev_close, | ||
465 | .ndo_start_xmit = start_tx, | ||
466 | .ndo_get_stats = get_stats, | ||
467 | .ndo_set_multicast_list = set_rx_mode, | ||
468 | .ndo_do_ioctl = netdev_ioctl, | ||
469 | .ndo_tx_timeout = tx_timeout, | ||
470 | .ndo_change_mtu = change_mtu, | ||
471 | .ndo_set_mac_address = sundance_set_mac_addr, | ||
472 | .ndo_validate_addr = eth_validate_addr, | ||
473 | }; | ||
474 | |||
475 | static int __devinit sundance_probe1 (struct pci_dev *pdev, | ||
476 | const struct pci_device_id *ent) | ||
477 | { | ||
478 | struct net_device *dev; | ||
479 | struct netdev_private *np; | ||
480 | static int card_idx; | ||
481 | int chip_idx = ent->driver_data; | ||
482 | int irq; | ||
483 | int i; | ||
484 | void __iomem *ioaddr; | ||
485 | u16 mii_ctl; | ||
486 | void *ring_space; | ||
487 | dma_addr_t ring_dma; | ||
488 | #ifdef USE_IO_OPS | ||
489 | int bar = 0; | ||
490 | #else | ||
491 | int bar = 1; | ||
492 | #endif | ||
493 | int phy, phy_end, phy_idx = 0; | ||
494 | |||
495 | /* when built into the kernel, we only print version if device is found */ | ||
496 | #ifndef MODULE | ||
497 | static int printed_version; | ||
498 | if (!printed_version++) | ||
499 | printk(version); | ||
500 | #endif | ||
501 | |||
502 | if (pci_enable_device(pdev)) | ||
503 | return -EIO; | ||
504 | pci_set_master(pdev); | ||
505 | |||
506 | irq = pdev->irq; | ||
507 | |||
508 | dev = alloc_etherdev(sizeof(*np)); | ||
509 | if (!dev) | ||
510 | return -ENOMEM; | ||
511 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
512 | |||
513 | if (pci_request_regions(pdev, DRV_NAME)) | ||
514 | goto err_out_netdev; | ||
515 | |||
516 | ioaddr = pci_iomap(pdev, bar, netdev_io_size); | ||
517 | if (!ioaddr) | ||
518 | goto err_out_res; | ||
519 | |||
520 | for (i = 0; i < 3; i++) | ||
521 | ((__le16 *)dev->dev_addr)[i] = | ||
522 | cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); | ||
523 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
524 | |||
525 | dev->base_addr = (unsigned long)ioaddr; | ||
526 | dev->irq = irq; | ||
527 | |||
528 | np = netdev_priv(dev); | ||
529 | np->base = ioaddr; | ||
530 | np->pci_dev = pdev; | ||
531 | np->chip_id = chip_idx; | ||
532 | np->msg_enable = (1 << debug) - 1; | ||
533 | spin_lock_init(&np->lock); | ||
534 | spin_lock_init(&np->statlock); | ||
535 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); | ||
536 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); | ||
537 | |||
538 | ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, | ||
539 | &ring_dma, GFP_KERNEL); | ||
540 | if (!ring_space) | ||
541 | goto err_out_cleardev; | ||
542 | np->tx_ring = (struct netdev_desc *)ring_space; | ||
543 | np->tx_ring_dma = ring_dma; | ||
544 | |||
545 | ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, | ||
546 | &ring_dma, GFP_KERNEL); | ||
547 | if (!ring_space) | ||
548 | goto err_out_unmap_tx; | ||
549 | np->rx_ring = (struct netdev_desc *)ring_space; | ||
550 | np->rx_ring_dma = ring_dma; | ||
551 | |||
552 | np->mii_if.dev = dev; | ||
553 | np->mii_if.mdio_read = mdio_read; | ||
554 | np->mii_if.mdio_write = mdio_write; | ||
555 | np->mii_if.phy_id_mask = 0x1f; | ||
556 | np->mii_if.reg_num_mask = 0x1f; | ||
557 | |||
558 | /* The chip-specific entries in the device structure. */ | ||
559 | dev->netdev_ops = &netdev_ops; | ||
560 | SET_ETHTOOL_OPS(dev, ðtool_ops); | ||
561 | dev->watchdog_timeo = TX_TIMEOUT; | ||
562 | |||
563 | pci_set_drvdata(pdev, dev); | ||
564 | |||
565 | i = register_netdev(dev); | ||
566 | if (i) | ||
567 | goto err_out_unmap_rx; | ||
568 | |||
569 | printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", | ||
570 | dev->name, pci_id_tbl[chip_idx].name, ioaddr, | ||
571 | dev->dev_addr, irq); | ||
572 | |||
573 | np->phys[0] = 1; /* Default setting */ | ||
574 | np->mii_preamble_required++; | ||
575 | |||
576 | /* | ||
577 | * It seems some phys doesn't deal well with address 0 being accessed | ||
578 | * first | ||
579 | */ | ||
580 | if (sundance_pci_tbl[np->chip_id].device == 0x0200) { | ||
581 | phy = 0; | ||
582 | phy_end = 31; | ||
583 | } else { | ||
584 | phy = 1; | ||
585 | phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ | ||
586 | } | ||
587 | for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { | ||
588 | int phyx = phy & 0x1f; | ||
589 | int mii_status = mdio_read(dev, phyx, MII_BMSR); | ||
590 | if (mii_status != 0xffff && mii_status != 0x0000) { | ||
591 | np->phys[phy_idx++] = phyx; | ||
592 | np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); | ||
593 | if ((mii_status & 0x0040) == 0) | ||
594 | np->mii_preamble_required++; | ||
595 | printk(KERN_INFO "%s: MII PHY found at address %d, status " | ||
596 | "0x%4.4x advertising %4.4x.\n", | ||
597 | dev->name, phyx, mii_status, np->mii_if.advertising); | ||
598 | } | ||
599 | } | ||
600 | np->mii_preamble_required--; | ||
601 | |||
602 | if (phy_idx == 0) { | ||
603 | printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", | ||
604 | dev->name, ioread32(ioaddr + ASICCtrl)); | ||
605 | goto err_out_unregister; | ||
606 | } | ||
607 | |||
608 | np->mii_if.phy_id = np->phys[0]; | ||
609 | |||
610 | /* Parse override configuration */ | ||
611 | np->an_enable = 1; | ||
612 | if (card_idx < MAX_UNITS) { | ||
613 | if (media[card_idx] != NULL) { | ||
614 | np->an_enable = 0; | ||
615 | if (strcmp (media[card_idx], "100mbps_fd") == 0 || | ||
616 | strcmp (media[card_idx], "4") == 0) { | ||
617 | np->speed = 100; | ||
618 | np->mii_if.full_duplex = 1; | ||
619 | } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || | ||
620 | strcmp (media[card_idx], "3") == 0) { | ||
621 | np->speed = 100; | ||
622 | np->mii_if.full_duplex = 0; | ||
623 | } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || | ||
624 | strcmp (media[card_idx], "2") == 0) { | ||
625 | np->speed = 10; | ||
626 | np->mii_if.full_duplex = 1; | ||
627 | } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || | ||
628 | strcmp (media[card_idx], "1") == 0) { | ||
629 | np->speed = 10; | ||
630 | np->mii_if.full_duplex = 0; | ||
631 | } else { | ||
632 | np->an_enable = 1; | ||
633 | } | ||
634 | } | ||
635 | if (flowctrl == 1) | ||
636 | np->flowctrl = 1; | ||
637 | } | ||
638 | |||
639 | /* Fibre PHY? */ | ||
640 | if (ioread32 (ioaddr + ASICCtrl) & 0x80) { | ||
641 | /* Default 100Mbps Full */ | ||
642 | if (np->an_enable) { | ||
643 | np->speed = 100; | ||
644 | np->mii_if.full_duplex = 1; | ||
645 | np->an_enable = 0; | ||
646 | } | ||
647 | } | ||
648 | /* Reset PHY */ | ||
649 | mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); | ||
650 | mdelay (300); | ||
651 | /* If flow control enabled, we need to advertise it.*/ | ||
652 | if (np->flowctrl) | ||
653 | mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); | ||
654 | mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); | ||
655 | /* Force media type */ | ||
656 | if (!np->an_enable) { | ||
657 | mii_ctl = 0; | ||
658 | mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; | ||
659 | mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; | ||
660 | mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); | ||
661 | printk (KERN_INFO "Override speed=%d, %s duplex\n", | ||
662 | np->speed, np->mii_if.full_duplex ? "Full" : "Half"); | ||
663 | |||
664 | } | ||
665 | |||
666 | /* Perhaps move the reset here? */ | ||
667 | /* Reset the chip to erase previous misconfiguration. */ | ||
668 | if (netif_msg_hw(np)) | ||
669 | printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); | ||
670 | sundance_reset(dev, 0x00ff << 16); | ||
671 | if (netif_msg_hw(np)) | ||
672 | printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); | ||
673 | |||
674 | card_idx++; | ||
675 | return 0; | ||
676 | |||
677 | err_out_unregister: | ||
678 | unregister_netdev(dev); | ||
679 | err_out_unmap_rx: | ||
680 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, | ||
681 | np->rx_ring, np->rx_ring_dma); | ||
682 | err_out_unmap_tx: | ||
683 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, | ||
684 | np->tx_ring, np->tx_ring_dma); | ||
685 | err_out_cleardev: | ||
686 | pci_set_drvdata(pdev, NULL); | ||
687 | pci_iounmap(pdev, ioaddr); | ||
688 | err_out_res: | ||
689 | pci_release_regions(pdev); | ||
690 | err_out_netdev: | ||
691 | free_netdev (dev); | ||
692 | return -ENODEV; | ||
693 | } | ||
694 | |||
695 | static int change_mtu(struct net_device *dev, int new_mtu) | ||
696 | { | ||
697 | if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */ | ||
698 | return -EINVAL; | ||
699 | if (netif_running(dev)) | ||
700 | return -EBUSY; | ||
701 | dev->mtu = new_mtu; | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | #define eeprom_delay(ee_addr) ioread32(ee_addr) | ||
706 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ | ||
707 | static int __devinit eeprom_read(void __iomem *ioaddr, int location) | ||
708 | { | ||
709 | int boguscnt = 10000; /* Typical 1900 ticks. */ | ||
710 | iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); | ||
711 | do { | ||
712 | eeprom_delay(ioaddr + EECtrl); | ||
713 | if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { | ||
714 | return ioread16(ioaddr + EEData); | ||
715 | } | ||
716 | } while (--boguscnt > 0); | ||
717 | return 0; | ||
718 | } | ||
719 | |||
720 | /* MII transceiver control section. | ||
721 | Read and write the MII registers using software-generated serial | ||
722 | MDIO protocol. See the MII specifications or DP83840A data sheet | ||
723 | for details. | ||
724 | |||
725 | The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | ||
726 | met by back-to-back 33Mhz PCI cycles. */ | ||
727 | #define mdio_delay() ioread8(mdio_addr) | ||
728 | |||
729 | enum mii_reg_bits { | ||
730 | MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, | ||
731 | }; | ||
732 | #define MDIO_EnbIn (0) | ||
733 | #define MDIO_WRITE0 (MDIO_EnbOutput) | ||
734 | #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) | ||
735 | |||
736 | /* Generate the preamble required for initial synchronization and | ||
737 | a few older transceivers. */ | ||
738 | static void mdio_sync(void __iomem *mdio_addr) | ||
739 | { | ||
740 | int bits = 32; | ||
741 | |||
742 | /* Establish sync by sending at least 32 logic ones. */ | ||
743 | while (--bits >= 0) { | ||
744 | iowrite8(MDIO_WRITE1, mdio_addr); | ||
745 | mdio_delay(); | ||
746 | iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); | ||
747 | mdio_delay(); | ||
748 | } | ||
749 | } | ||
750 | |||
751 | static int mdio_read(struct net_device *dev, int phy_id, int location) | ||
752 | { | ||
753 | struct netdev_private *np = netdev_priv(dev); | ||
754 | void __iomem *mdio_addr = np->base + MIICtrl; | ||
755 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; | ||
756 | int i, retval = 0; | ||
757 | |||
758 | if (np->mii_preamble_required) | ||
759 | mdio_sync(mdio_addr); | ||
760 | |||
761 | /* Shift the read command bits out. */ | ||
762 | for (i = 15; i >= 0; i--) { | ||
763 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | ||
764 | |||
765 | iowrite8(dataval, mdio_addr); | ||
766 | mdio_delay(); | ||
767 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); | ||
768 | mdio_delay(); | ||
769 | } | ||
770 | /* Read the two transition, 16 data, and wire-idle bits. */ | ||
771 | for (i = 19; i > 0; i--) { | ||
772 | iowrite8(MDIO_EnbIn, mdio_addr); | ||
773 | mdio_delay(); | ||
774 | retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); | ||
775 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | ||
776 | mdio_delay(); | ||
777 | } | ||
778 | return (retval>>1) & 0xffff; | ||
779 | } | ||
780 | |||
781 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | ||
782 | { | ||
783 | struct netdev_private *np = netdev_priv(dev); | ||
784 | void __iomem *mdio_addr = np->base + MIICtrl; | ||
785 | int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; | ||
786 | int i; | ||
787 | |||
788 | if (np->mii_preamble_required) | ||
789 | mdio_sync(mdio_addr); | ||
790 | |||
791 | /* Shift the command bits out. */ | ||
792 | for (i = 31; i >= 0; i--) { | ||
793 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | ||
794 | |||
795 | iowrite8(dataval, mdio_addr); | ||
796 | mdio_delay(); | ||
797 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); | ||
798 | mdio_delay(); | ||
799 | } | ||
800 | /* Clear out extra bits. */ | ||
801 | for (i = 2; i > 0; i--) { | ||
802 | iowrite8(MDIO_EnbIn, mdio_addr); | ||
803 | mdio_delay(); | ||
804 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | ||
805 | mdio_delay(); | ||
806 | } | ||
807 | } | ||
808 | |||
809 | static int mdio_wait_link(struct net_device *dev, int wait) | ||
810 | { | ||
811 | int bmsr; | ||
812 | int phy_id; | ||
813 | struct netdev_private *np; | ||
814 | |||
815 | np = netdev_priv(dev); | ||
816 | phy_id = np->phys[0]; | ||
817 | |||
818 | do { | ||
819 | bmsr = mdio_read(dev, phy_id, MII_BMSR); | ||
820 | if (bmsr & 0x0004) | ||
821 | return 0; | ||
822 | mdelay(1); | ||
823 | } while (--wait > 0); | ||
824 | return -1; | ||
825 | } | ||
826 | |||
827 | static int netdev_open(struct net_device *dev) | ||
828 | { | ||
829 | struct netdev_private *np = netdev_priv(dev); | ||
830 | void __iomem *ioaddr = np->base; | ||
831 | unsigned long flags; | ||
832 | int i; | ||
833 | |||
834 | /* Do we need to reset the chip??? */ | ||
835 | |||
836 | i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); | ||
837 | if (i) | ||
838 | return i; | ||
839 | |||
840 | if (netif_msg_ifup(np)) | ||
841 | printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", | ||
842 | dev->name, dev->irq); | ||
843 | init_ring(dev); | ||
844 | |||
845 | iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); | ||
846 | /* The Tx list pointer is written as packets are queued. */ | ||
847 | |||
848 | /* Initialize other registers. */ | ||
849 | __set_mac_addr(dev); | ||
850 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
851 | iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); | ||
852 | #else | ||
853 | iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); | ||
854 | #endif | ||
855 | if (dev->mtu > 2047) | ||
856 | iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); | ||
857 | |||
858 | /* Configure the PCI bus bursts and FIFO thresholds. */ | ||
859 | |||
860 | if (dev->if_port == 0) | ||
861 | dev->if_port = np->default_port; | ||
862 | |||
863 | spin_lock_init(&np->mcastlock); | ||
864 | |||
865 | set_rx_mode(dev); | ||
866 | iowrite16(0, ioaddr + IntrEnable); | ||
867 | iowrite16(0, ioaddr + DownCounter); | ||
868 | /* Set the chip to poll every N*320nsec. */ | ||
869 | iowrite8(100, ioaddr + RxDMAPollPeriod); | ||
870 | iowrite8(127, ioaddr + TxDMAPollPeriod); | ||
871 | /* Fix DFE-580TX packet drop issue */ | ||
872 | if (np->pci_dev->revision >= 0x14) | ||
873 | iowrite8(0x01, ioaddr + DebugCtrl1); | ||
874 | netif_start_queue(dev); | ||
875 | |||
876 | spin_lock_irqsave(&np->lock, flags); | ||
877 | reset_tx(dev); | ||
878 | spin_unlock_irqrestore(&np->lock, flags); | ||
879 | |||
880 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); | ||
881 | |||
882 | if (netif_msg_ifup(np)) | ||
883 | printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " | ||
884 | "MAC Control %x, %4.4x %4.4x.\n", | ||
885 | dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), | ||
886 | ioread32(ioaddr + MACCtrl0), | ||
887 | ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); | ||
888 | |||
889 | /* Set the timer to check for link beat. */ | ||
890 | init_timer(&np->timer); | ||
891 | np->timer.expires = jiffies + 3*HZ; | ||
892 | np->timer.data = (unsigned long)dev; | ||
893 | np->timer.function = netdev_timer; /* timer handler */ | ||
894 | add_timer(&np->timer); | ||
895 | |||
896 | /* Enable interrupts by setting the interrupt mask. */ | ||
897 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | static void check_duplex(struct net_device *dev) | ||
903 | { | ||
904 | struct netdev_private *np = netdev_priv(dev); | ||
905 | void __iomem *ioaddr = np->base; | ||
906 | int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); | ||
907 | int negotiated = mii_lpa & np->mii_if.advertising; | ||
908 | int duplex; | ||
909 | |||
910 | /* Force media */ | ||
911 | if (!np->an_enable || mii_lpa == 0xffff) { | ||
912 | if (np->mii_if.full_duplex) | ||
913 | iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, | ||
914 | ioaddr + MACCtrl0); | ||
915 | return; | ||
916 | } | ||
917 | |||
918 | /* Autonegotiation */ | ||
919 | duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; | ||
920 | if (np->mii_if.full_duplex != duplex) { | ||
921 | np->mii_if.full_duplex = duplex; | ||
922 | if (netif_msg_link(np)) | ||
923 | printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " | ||
924 | "negotiated capability %4.4x.\n", dev->name, | ||
925 | duplex ? "full" : "half", np->phys[0], negotiated); | ||
926 | iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | static void netdev_timer(unsigned long data) | ||
931 | { | ||
932 | struct net_device *dev = (struct net_device *)data; | ||
933 | struct netdev_private *np = netdev_priv(dev); | ||
934 | void __iomem *ioaddr = np->base; | ||
935 | int next_tick = 10*HZ; | ||
936 | |||
937 | if (netif_msg_timer(np)) { | ||
938 | printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " | ||
939 | "Tx %x Rx %x.\n", | ||
940 | dev->name, ioread16(ioaddr + IntrEnable), | ||
941 | ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); | ||
942 | } | ||
943 | check_duplex(dev); | ||
944 | np->timer.expires = jiffies + next_tick; | ||
945 | add_timer(&np->timer); | ||
946 | } | ||
947 | |||
948 | static void tx_timeout(struct net_device *dev) | ||
949 | { | ||
950 | struct netdev_private *np = netdev_priv(dev); | ||
951 | void __iomem *ioaddr = np->base; | ||
952 | unsigned long flag; | ||
953 | |||
954 | netif_stop_queue(dev); | ||
955 | tasklet_disable(&np->tx_tasklet); | ||
956 | iowrite16(0, ioaddr + IntrEnable); | ||
957 | printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " | ||
958 | "TxFrameId %2.2x," | ||
959 | " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), | ||
960 | ioread8(ioaddr + TxFrameId)); | ||
961 | |||
962 | { | ||
963 | int i; | ||
964 | for (i=0; i<TX_RING_SIZE; i++) { | ||
965 | printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, | ||
966 | (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), | ||
967 | le32_to_cpu(np->tx_ring[i].next_desc), | ||
968 | le32_to_cpu(np->tx_ring[i].status), | ||
969 | (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, | ||
970 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | ||
971 | le32_to_cpu(np->tx_ring[i].frag[0].length)); | ||
972 | } | ||
973 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", | ||
974 | ioread32(np->base + TxListPtr), | ||
975 | netif_queue_stopped(dev)); | ||
976 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", | ||
977 | np->cur_tx, np->cur_tx % TX_RING_SIZE, | ||
978 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); | ||
979 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); | ||
980 | printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); | ||
981 | } | ||
982 | spin_lock_irqsave(&np->lock, flag); | ||
983 | |||
984 | /* Stop and restart the chip's Tx processes . */ | ||
985 | reset_tx(dev); | ||
986 | spin_unlock_irqrestore(&np->lock, flag); | ||
987 | |||
988 | dev->if_port = 0; | ||
989 | |||
990 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
991 | dev->stats.tx_errors++; | ||
992 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { | ||
993 | netif_wake_queue(dev); | ||
994 | } | ||
995 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | ||
996 | tasklet_enable(&np->tx_tasklet); | ||
997 | } | ||
998 | |||
999 | |||
1000 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | ||
1001 | static void init_ring(struct net_device *dev) | ||
1002 | { | ||
1003 | struct netdev_private *np = netdev_priv(dev); | ||
1004 | int i; | ||
1005 | |||
1006 | np->cur_rx = np->cur_tx = 0; | ||
1007 | np->dirty_rx = np->dirty_tx = 0; | ||
1008 | np->cur_task = 0; | ||
1009 | |||
1010 | np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); | ||
1011 | |||
1012 | /* Initialize all Rx descriptors. */ | ||
1013 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1014 | np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + | ||
1015 | ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); | ||
1016 | np->rx_ring[i].status = 0; | ||
1017 | np->rx_ring[i].frag[0].length = 0; | ||
1018 | np->rx_skbuff[i] = NULL; | ||
1019 | } | ||
1020 | |||
1021 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | ||
1022 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1023 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2); | ||
1024 | np->rx_skbuff[i] = skb; | ||
1025 | if (skb == NULL) | ||
1026 | break; | ||
1027 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1028 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | ||
1029 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | ||
1030 | dma_map_single(&np->pci_dev->dev, skb->data, | ||
1031 | np->rx_buf_sz, DMA_FROM_DEVICE)); | ||
1032 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1033 | np->rx_ring[i].frag[0].addr)) { | ||
1034 | dev_kfree_skb(skb); | ||
1035 | np->rx_skbuff[i] = NULL; | ||
1036 | break; | ||
1037 | } | ||
1038 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); | ||
1039 | } | ||
1040 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | ||
1041 | |||
1042 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1043 | np->tx_skbuff[i] = NULL; | ||
1044 | np->tx_ring[i].status = 0; | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | static void tx_poll (unsigned long data) | ||
1049 | { | ||
1050 | struct net_device *dev = (struct net_device *)data; | ||
1051 | struct netdev_private *np = netdev_priv(dev); | ||
1052 | unsigned head = np->cur_task % TX_RING_SIZE; | ||
1053 | struct netdev_desc *txdesc = | ||
1054 | &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; | ||
1055 | |||
1056 | /* Chain the next pointer */ | ||
1057 | for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { | ||
1058 | int entry = np->cur_task % TX_RING_SIZE; | ||
1059 | txdesc = &np->tx_ring[entry]; | ||
1060 | if (np->last_tx) { | ||
1061 | np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + | ||
1062 | entry*sizeof(struct netdev_desc)); | ||
1063 | } | ||
1064 | np->last_tx = txdesc; | ||
1065 | } | ||
1066 | /* Indicate the latest descriptor of tx ring */ | ||
1067 | txdesc->status |= cpu_to_le32(DescIntrOnTx); | ||
1068 | |||
1069 | if (ioread32 (np->base + TxListPtr) == 0) | ||
1070 | iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), | ||
1071 | np->base + TxListPtr); | ||
1072 | } | ||
1073 | |||
1074 | static netdev_tx_t | ||
1075 | start_tx (struct sk_buff *skb, struct net_device *dev) | ||
1076 | { | ||
1077 | struct netdev_private *np = netdev_priv(dev); | ||
1078 | struct netdev_desc *txdesc; | ||
1079 | unsigned entry; | ||
1080 | |||
1081 | /* Calculate the next Tx descriptor entry. */ | ||
1082 | entry = np->cur_tx % TX_RING_SIZE; | ||
1083 | np->tx_skbuff[entry] = skb; | ||
1084 | txdesc = &np->tx_ring[entry]; | ||
1085 | |||
1086 | txdesc->next_desc = 0; | ||
1087 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); | ||
1088 | txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, | ||
1089 | skb->data, skb->len, DMA_TO_DEVICE)); | ||
1090 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1091 | txdesc->frag[0].addr)) | ||
1092 | goto drop_frame; | ||
1093 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); | ||
1094 | |||
1095 | /* Increment cur_tx before tasklet_schedule() */ | ||
1096 | np->cur_tx++; | ||
1097 | mb(); | ||
1098 | /* Schedule a tx_poll() task */ | ||
1099 | tasklet_schedule(&np->tx_tasklet); | ||
1100 | |||
1101 | /* On some architectures: explicitly flush cache lines here. */ | ||
1102 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && | ||
1103 | !netif_queue_stopped(dev)) { | ||
1104 | /* do nothing */ | ||
1105 | } else { | ||
1106 | netif_stop_queue (dev); | ||
1107 | } | ||
1108 | if (netif_msg_tx_queued(np)) { | ||
1109 | printk (KERN_DEBUG | ||
1110 | "%s: Transmit frame #%d queued in slot %d.\n", | ||
1111 | dev->name, np->cur_tx, entry); | ||
1112 | } | ||
1113 | return NETDEV_TX_OK; | ||
1114 | |||
1115 | drop_frame: | ||
1116 | dev_kfree_skb(skb); | ||
1117 | np->tx_skbuff[entry] = NULL; | ||
1118 | dev->stats.tx_dropped++; | ||
1119 | return NETDEV_TX_OK; | ||
1120 | } | ||
1121 | |||
1122 | /* Reset hardware tx and free all of tx buffers */ | ||
1123 | static int | ||
1124 | reset_tx (struct net_device *dev) | ||
1125 | { | ||
1126 | struct netdev_private *np = netdev_priv(dev); | ||
1127 | void __iomem *ioaddr = np->base; | ||
1128 | struct sk_buff *skb; | ||
1129 | int i; | ||
1130 | |||
1131 | /* Reset tx logic, TxListPtr will be cleaned */ | ||
1132 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | ||
1133 | sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); | ||
1134 | |||
1135 | /* free all tx skbuff */ | ||
1136 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1137 | np->tx_ring[i].next_desc = 0; | ||
1138 | |||
1139 | skb = np->tx_skbuff[i]; | ||
1140 | if (skb) { | ||
1141 | dma_unmap_single(&np->pci_dev->dev, | ||
1142 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | ||
1143 | skb->len, DMA_TO_DEVICE); | ||
1144 | dev_kfree_skb_any(skb); | ||
1145 | np->tx_skbuff[i] = NULL; | ||
1146 | dev->stats.tx_dropped++; | ||
1147 | } | ||
1148 | } | ||
1149 | np->cur_tx = np->dirty_tx = 0; | ||
1150 | np->cur_task = 0; | ||
1151 | |||
1152 | np->last_tx = NULL; | ||
1153 | iowrite8(127, ioaddr + TxDMAPollPeriod); | ||
1154 | |||
1155 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); | ||
1156 | return 0; | ||
1157 | } | ||
1158 | |||
1159 | /* The interrupt handler cleans up after the Tx thread, | ||
1160 | and schedule a Rx thread work */ | ||
1161 | static irqreturn_t intr_handler(int irq, void *dev_instance) | ||
1162 | { | ||
1163 | struct net_device *dev = (struct net_device *)dev_instance; | ||
1164 | struct netdev_private *np = netdev_priv(dev); | ||
1165 | void __iomem *ioaddr = np->base; | ||
1166 | int hw_frame_id; | ||
1167 | int tx_cnt; | ||
1168 | int tx_status; | ||
1169 | int handled = 0; | ||
1170 | int i; | ||
1171 | |||
1172 | |||
1173 | do { | ||
1174 | int intr_status = ioread16(ioaddr + IntrStatus); | ||
1175 | iowrite16(intr_status, ioaddr + IntrStatus); | ||
1176 | |||
1177 | if (netif_msg_intr(np)) | ||
1178 | printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", | ||
1179 | dev->name, intr_status); | ||
1180 | |||
1181 | if (!(intr_status & DEFAULT_INTR)) | ||
1182 | break; | ||
1183 | |||
1184 | handled = 1; | ||
1185 | |||
1186 | if (intr_status & (IntrRxDMADone)) { | ||
1187 | iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), | ||
1188 | ioaddr + IntrEnable); | ||
1189 | if (np->budget < 0) | ||
1190 | np->budget = RX_BUDGET; | ||
1191 | tasklet_schedule(&np->rx_tasklet); | ||
1192 | } | ||
1193 | if (intr_status & (IntrTxDone | IntrDrvRqst)) { | ||
1194 | tx_status = ioread16 (ioaddr + TxStatus); | ||
1195 | for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { | ||
1196 | if (netif_msg_tx_done(np)) | ||
1197 | printk | ||
1198 | ("%s: Transmit status is %2.2x.\n", | ||
1199 | dev->name, tx_status); | ||
1200 | if (tx_status & 0x1e) { | ||
1201 | if (netif_msg_tx_err(np)) | ||
1202 | printk("%s: Transmit error status %4.4x.\n", | ||
1203 | dev->name, tx_status); | ||
1204 | dev->stats.tx_errors++; | ||
1205 | if (tx_status & 0x10) | ||
1206 | dev->stats.tx_fifo_errors++; | ||
1207 | if (tx_status & 0x08) | ||
1208 | dev->stats.collisions++; | ||
1209 | if (tx_status & 0x04) | ||
1210 | dev->stats.tx_fifo_errors++; | ||
1211 | if (tx_status & 0x02) | ||
1212 | dev->stats.tx_window_errors++; | ||
1213 | |||
1214 | /* | ||
1215 | ** This reset has been verified on | ||
1216 | ** DFE-580TX boards ! phdm@macqel.be. | ||
1217 | */ | ||
1218 | if (tx_status & 0x10) { /* TxUnderrun */ | ||
1219 | /* Restart Tx FIFO and transmitter */ | ||
1220 | sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); | ||
1221 | /* No need to reset the Tx pointer here */ | ||
1222 | } | ||
1223 | /* Restart the Tx. Need to make sure tx enabled */ | ||
1224 | i = 10; | ||
1225 | do { | ||
1226 | iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); | ||
1227 | if (ioread16(ioaddr + MACCtrl1) & TxEnabled) | ||
1228 | break; | ||
1229 | mdelay(1); | ||
1230 | } while (--i); | ||
1231 | } | ||
1232 | /* Yup, this is a documentation bug. It cost me *hours*. */ | ||
1233 | iowrite16 (0, ioaddr + TxStatus); | ||
1234 | if (tx_cnt < 0) { | ||
1235 | iowrite32(5000, ioaddr + DownCounter); | ||
1236 | break; | ||
1237 | } | ||
1238 | tx_status = ioread16 (ioaddr + TxStatus); | ||
1239 | } | ||
1240 | hw_frame_id = (tx_status >> 8) & 0xff; | ||
1241 | } else { | ||
1242 | hw_frame_id = ioread8(ioaddr + TxFrameId); | ||
1243 | } | ||
1244 | |||
1245 | if (np->pci_dev->revision >= 0x14) { | ||
1246 | spin_lock(&np->lock); | ||
1247 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | ||
1248 | int entry = np->dirty_tx % TX_RING_SIZE; | ||
1249 | struct sk_buff *skb; | ||
1250 | int sw_frame_id; | ||
1251 | sw_frame_id = (le32_to_cpu( | ||
1252 | np->tx_ring[entry].status) >> 2) & 0xff; | ||
1253 | if (sw_frame_id == hw_frame_id && | ||
1254 | !(le32_to_cpu(np->tx_ring[entry].status) | ||
1255 | & 0x00010000)) | ||
1256 | break; | ||
1257 | if (sw_frame_id == (hw_frame_id + 1) % | ||
1258 | TX_RING_SIZE) | ||
1259 | break; | ||
1260 | skb = np->tx_skbuff[entry]; | ||
1261 | /* Free the original skb. */ | ||
1262 | dma_unmap_single(&np->pci_dev->dev, | ||
1263 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | ||
1264 | skb->len, DMA_TO_DEVICE); | ||
1265 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | ||
1266 | np->tx_skbuff[entry] = NULL; | ||
1267 | np->tx_ring[entry].frag[0].addr = 0; | ||
1268 | np->tx_ring[entry].frag[0].length = 0; | ||
1269 | } | ||
1270 | spin_unlock(&np->lock); | ||
1271 | } else { | ||
1272 | spin_lock(&np->lock); | ||
1273 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | ||
1274 | int entry = np->dirty_tx % TX_RING_SIZE; | ||
1275 | struct sk_buff *skb; | ||
1276 | if (!(le32_to_cpu(np->tx_ring[entry].status) | ||
1277 | & 0x00010000)) | ||
1278 | break; | ||
1279 | skb = np->tx_skbuff[entry]; | ||
1280 | /* Free the original skb. */ | ||
1281 | dma_unmap_single(&np->pci_dev->dev, | ||
1282 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | ||
1283 | skb->len, DMA_TO_DEVICE); | ||
1284 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | ||
1285 | np->tx_skbuff[entry] = NULL; | ||
1286 | np->tx_ring[entry].frag[0].addr = 0; | ||
1287 | np->tx_ring[entry].frag[0].length = 0; | ||
1288 | } | ||
1289 | spin_unlock(&np->lock); | ||
1290 | } | ||
1291 | |||
1292 | if (netif_queue_stopped(dev) && | ||
1293 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { | ||
1294 | /* The ring is no longer full, clear busy flag. */ | ||
1295 | netif_wake_queue (dev); | ||
1296 | } | ||
1297 | /* Abnormal error summary/uncommon events handlers. */ | ||
1298 | if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) | ||
1299 | netdev_error(dev, intr_status); | ||
1300 | } while (0); | ||
1301 | if (netif_msg_intr(np)) | ||
1302 | printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", | ||
1303 | dev->name, ioread16(ioaddr + IntrStatus)); | ||
1304 | return IRQ_RETVAL(handled); | ||
1305 | } | ||
1306 | |||
1307 | static void rx_poll(unsigned long data) | ||
1308 | { | ||
1309 | struct net_device *dev = (struct net_device *)data; | ||
1310 | struct netdev_private *np = netdev_priv(dev); | ||
1311 | int entry = np->cur_rx % RX_RING_SIZE; | ||
1312 | int boguscnt = np->budget; | ||
1313 | void __iomem *ioaddr = np->base; | ||
1314 | int received = 0; | ||
1315 | |||
1316 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | ||
1317 | while (1) { | ||
1318 | struct netdev_desc *desc = &(np->rx_ring[entry]); | ||
1319 | u32 frame_status = le32_to_cpu(desc->status); | ||
1320 | int pkt_len; | ||
1321 | |||
1322 | if (--boguscnt < 0) { | ||
1323 | goto not_done; | ||
1324 | } | ||
1325 | if (!(frame_status & DescOwn)) | ||
1326 | break; | ||
1327 | pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ | ||
1328 | if (netif_msg_rx_status(np)) | ||
1329 | printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", | ||
1330 | frame_status); | ||
1331 | if (frame_status & 0x001f4000) { | ||
1332 | /* There was a error. */ | ||
1333 | if (netif_msg_rx_err(np)) | ||
1334 | printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", | ||
1335 | frame_status); | ||
1336 | dev->stats.rx_errors++; | ||
1337 | if (frame_status & 0x00100000) | ||
1338 | dev->stats.rx_length_errors++; | ||
1339 | if (frame_status & 0x00010000) | ||
1340 | dev->stats.rx_fifo_errors++; | ||
1341 | if (frame_status & 0x00060000) | ||
1342 | dev->stats.rx_frame_errors++; | ||
1343 | if (frame_status & 0x00080000) | ||
1344 | dev->stats.rx_crc_errors++; | ||
1345 | if (frame_status & 0x00100000) { | ||
1346 | printk(KERN_WARNING "%s: Oversized Ethernet frame," | ||
1347 | " status %8.8x.\n", | ||
1348 | dev->name, frame_status); | ||
1349 | } | ||
1350 | } else { | ||
1351 | struct sk_buff *skb; | ||
1352 | #ifndef final_version | ||
1353 | if (netif_msg_rx_status(np)) | ||
1354 | printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" | ||
1355 | ", bogus_cnt %d.\n", | ||
1356 | pkt_len, boguscnt); | ||
1357 | #endif | ||
1358 | /* Check if the packet is long enough to accept without copying | ||
1359 | to a minimally-sized skbuff. */ | ||
1360 | if (pkt_len < rx_copybreak && | ||
1361 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
1362 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
1363 | dma_sync_single_for_cpu(&np->pci_dev->dev, | ||
1364 | le32_to_cpu(desc->frag[0].addr), | ||
1365 | np->rx_buf_sz, DMA_FROM_DEVICE); | ||
1366 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); | ||
1367 | dma_sync_single_for_device(&np->pci_dev->dev, | ||
1368 | le32_to_cpu(desc->frag[0].addr), | ||
1369 | np->rx_buf_sz, DMA_FROM_DEVICE); | ||
1370 | skb_put(skb, pkt_len); | ||
1371 | } else { | ||
1372 | dma_unmap_single(&np->pci_dev->dev, | ||
1373 | le32_to_cpu(desc->frag[0].addr), | ||
1374 | np->rx_buf_sz, DMA_FROM_DEVICE); | ||
1375 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | ||
1376 | np->rx_skbuff[entry] = NULL; | ||
1377 | } | ||
1378 | skb->protocol = eth_type_trans(skb, dev); | ||
1379 | /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ | ||
1380 | netif_rx(skb); | ||
1381 | } | ||
1382 | entry = (entry + 1) % RX_RING_SIZE; | ||
1383 | received++; | ||
1384 | } | ||
1385 | np->cur_rx = entry; | ||
1386 | refill_rx (dev); | ||
1387 | np->budget -= received; | ||
1388 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); | ||
1389 | return; | ||
1390 | |||
1391 | not_done: | ||
1392 | np->cur_rx = entry; | ||
1393 | refill_rx (dev); | ||
1394 | if (!received) | ||
1395 | received = 1; | ||
1396 | np->budget -= received; | ||
1397 | if (np->budget <= 0) | ||
1398 | np->budget = RX_BUDGET; | ||
1399 | tasklet_schedule(&np->rx_tasklet); | ||
1400 | } | ||
1401 | |||
1402 | static void refill_rx (struct net_device *dev) | ||
1403 | { | ||
1404 | struct netdev_private *np = netdev_priv(dev); | ||
1405 | int entry; | ||
1406 | int cnt = 0; | ||
1407 | |||
1408 | /* Refill the Rx ring buffers. */ | ||
1409 | for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; | ||
1410 | np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { | ||
1411 | struct sk_buff *skb; | ||
1412 | entry = np->dirty_rx % RX_RING_SIZE; | ||
1413 | if (np->rx_skbuff[entry] == NULL) { | ||
1414 | skb = dev_alloc_skb(np->rx_buf_sz + 2); | ||
1415 | np->rx_skbuff[entry] = skb; | ||
1416 | if (skb == NULL) | ||
1417 | break; /* Better luck next round. */ | ||
1418 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1419 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
1420 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | ||
1421 | dma_map_single(&np->pci_dev->dev, skb->data, | ||
1422 | np->rx_buf_sz, DMA_FROM_DEVICE)); | ||
1423 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1424 | np->rx_ring[entry].frag[0].addr)) { | ||
1425 | dev_kfree_skb_irq(skb); | ||
1426 | np->rx_skbuff[entry] = NULL; | ||
1427 | break; | ||
1428 | } | ||
1429 | } | ||
1430 | /* Perhaps we need not reset this field. */ | ||
1431 | np->rx_ring[entry].frag[0].length = | ||
1432 | cpu_to_le32(np->rx_buf_sz | LastFrag); | ||
1433 | np->rx_ring[entry].status = 0; | ||
1434 | cnt++; | ||
1435 | } | ||
1436 | } | ||
1437 | static void netdev_error(struct net_device *dev, int intr_status) | ||
1438 | { | ||
1439 | struct netdev_private *np = netdev_priv(dev); | ||
1440 | void __iomem *ioaddr = np->base; | ||
1441 | u16 mii_ctl, mii_advertise, mii_lpa; | ||
1442 | int speed; | ||
1443 | |||
1444 | if (intr_status & LinkChange) { | ||
1445 | if (mdio_wait_link(dev, 10) == 0) { | ||
1446 | printk(KERN_INFO "%s: Link up\n", dev->name); | ||
1447 | if (np->an_enable) { | ||
1448 | mii_advertise = mdio_read(dev, np->phys[0], | ||
1449 | MII_ADVERTISE); | ||
1450 | mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); | ||
1451 | mii_advertise &= mii_lpa; | ||
1452 | printk(KERN_INFO "%s: Link changed: ", | ||
1453 | dev->name); | ||
1454 | if (mii_advertise & ADVERTISE_100FULL) { | ||
1455 | np->speed = 100; | ||
1456 | printk("100Mbps, full duplex\n"); | ||
1457 | } else if (mii_advertise & ADVERTISE_100HALF) { | ||
1458 | np->speed = 100; | ||
1459 | printk("100Mbps, half duplex\n"); | ||
1460 | } else if (mii_advertise & ADVERTISE_10FULL) { | ||
1461 | np->speed = 10; | ||
1462 | printk("10Mbps, full duplex\n"); | ||
1463 | } else if (mii_advertise & ADVERTISE_10HALF) { | ||
1464 | np->speed = 10; | ||
1465 | printk("10Mbps, half duplex\n"); | ||
1466 | } else | ||
1467 | printk("\n"); | ||
1468 | |||
1469 | } else { | ||
1470 | mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); | ||
1471 | speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; | ||
1472 | np->speed = speed; | ||
1473 | printk(KERN_INFO "%s: Link changed: %dMbps ,", | ||
1474 | dev->name, speed); | ||
1475 | printk("%s duplex.\n", | ||
1476 | (mii_ctl & BMCR_FULLDPLX) ? | ||
1477 | "full" : "half"); | ||
1478 | } | ||
1479 | check_duplex(dev); | ||
1480 | if (np->flowctrl && np->mii_if.full_duplex) { | ||
1481 | iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, | ||
1482 | ioaddr + MulticastFilter1+2); | ||
1483 | iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, | ||
1484 | ioaddr + MACCtrl0); | ||
1485 | } | ||
1486 | netif_carrier_on(dev); | ||
1487 | } else { | ||
1488 | printk(KERN_INFO "%s: Link down\n", dev->name); | ||
1489 | netif_carrier_off(dev); | ||
1490 | } | ||
1491 | } | ||
1492 | if (intr_status & StatsMax) { | ||
1493 | get_stats(dev); | ||
1494 | } | ||
1495 | if (intr_status & IntrPCIErr) { | ||
1496 | printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", | ||
1497 | dev->name, intr_status); | ||
1498 | /* We must do a global reset of DMA to continue. */ | ||
1499 | } | ||
1500 | } | ||
1501 | |||
1502 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
1503 | { | ||
1504 | struct netdev_private *np = netdev_priv(dev); | ||
1505 | void __iomem *ioaddr = np->base; | ||
1506 | unsigned long flags; | ||
1507 | u8 late_coll, single_coll, mult_coll; | ||
1508 | |||
1509 | spin_lock_irqsave(&np->statlock, flags); | ||
1510 | /* The chip only need report frame silently dropped. */ | ||
1511 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); | ||
1512 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); | ||
1513 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); | ||
1514 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); | ||
1515 | |||
1516 | mult_coll = ioread8(ioaddr + StatsMultiColl); | ||
1517 | np->xstats.tx_multiple_collisions += mult_coll; | ||
1518 | single_coll = ioread8(ioaddr + StatsOneColl); | ||
1519 | np->xstats.tx_single_collisions += single_coll; | ||
1520 | late_coll = ioread8(ioaddr + StatsLateColl); | ||
1521 | np->xstats.tx_late_collisions += late_coll; | ||
1522 | dev->stats.collisions += mult_coll | ||
1523 | + single_coll | ||
1524 | + late_coll; | ||
1525 | |||
1526 | np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); | ||
1527 | np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); | ||
1528 | np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); | ||
1529 | np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); | ||
1530 | np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); | ||
1531 | np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); | ||
1532 | np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); | ||
1533 | |||
1534 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); | ||
1535 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; | ||
1536 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); | ||
1537 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; | ||
1538 | |||
1539 | spin_unlock_irqrestore(&np->statlock, flags); | ||
1540 | |||
1541 | return &dev->stats; | ||
1542 | } | ||
1543 | |||
1544 | static void set_rx_mode(struct net_device *dev) | ||
1545 | { | ||
1546 | struct netdev_private *np = netdev_priv(dev); | ||
1547 | void __iomem *ioaddr = np->base; | ||
1548 | u16 mc_filter[4]; /* Multicast hash filter */ | ||
1549 | u32 rx_mode; | ||
1550 | int i; | ||
1551 | |||
1552 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | ||
1553 | memset(mc_filter, 0xff, sizeof(mc_filter)); | ||
1554 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; | ||
1555 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || | ||
1556 | (dev->flags & IFF_ALLMULTI)) { | ||
1557 | /* Too many to match, or accept all multicasts. */ | ||
1558 | memset(mc_filter, 0xff, sizeof(mc_filter)); | ||
1559 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | ||
1560 | } else if (!netdev_mc_empty(dev)) { | ||
1561 | struct netdev_hw_addr *ha; | ||
1562 | int bit; | ||
1563 | int index; | ||
1564 | int crc; | ||
1565 | memset (mc_filter, 0, sizeof (mc_filter)); | ||
1566 | netdev_for_each_mc_addr(ha, dev) { | ||
1567 | crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
1568 | for (index=0, bit=0; bit < 6; bit++, crc <<= 1) | ||
1569 | if (crc & 0x80000000) index |= 1 << bit; | ||
1570 | mc_filter[index/16] |= (1 << (index % 16)); | ||
1571 | } | ||
1572 | rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; | ||
1573 | } else { | ||
1574 | iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); | ||
1575 | return; | ||
1576 | } | ||
1577 | if (np->mii_if.full_duplex && np->flowctrl) | ||
1578 | mc_filter[3] |= 0x0200; | ||
1579 | |||
1580 | for (i = 0; i < 4; i++) | ||
1581 | iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); | ||
1582 | iowrite8(rx_mode, ioaddr + RxMode); | ||
1583 | } | ||
1584 | |||
1585 | static int __set_mac_addr(struct net_device *dev) | ||
1586 | { | ||
1587 | struct netdev_private *np = netdev_priv(dev); | ||
1588 | u16 addr16; | ||
1589 | |||
1590 | addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); | ||
1591 | iowrite16(addr16, np->base + StationAddr); | ||
1592 | addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); | ||
1593 | iowrite16(addr16, np->base + StationAddr+2); | ||
1594 | addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); | ||
1595 | iowrite16(addr16, np->base + StationAddr+4); | ||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | /* Invoked with rtnl_lock held */ | ||
1600 | static int sundance_set_mac_addr(struct net_device *dev, void *data) | ||
1601 | { | ||
1602 | const struct sockaddr *addr = data; | ||
1603 | |||
1604 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1605 | return -EINVAL; | ||
1606 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
1607 | __set_mac_addr(dev); | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1612 | static const struct { | ||
1613 | const char name[ETH_GSTRING_LEN]; | ||
1614 | } sundance_stats[] = { | ||
1615 | { "tx_multiple_collisions" }, | ||
1616 | { "tx_single_collisions" }, | ||
1617 | { "tx_late_collisions" }, | ||
1618 | { "tx_deferred" }, | ||
1619 | { "tx_deferred_excessive" }, | ||
1620 | { "tx_aborted" }, | ||
1621 | { "tx_bcasts" }, | ||
1622 | { "rx_bcasts" }, | ||
1623 | { "tx_mcasts" }, | ||
1624 | { "rx_mcasts" }, | ||
1625 | }; | ||
1626 | |||
1627 | static int check_if_running(struct net_device *dev) | ||
1628 | { | ||
1629 | if (!netif_running(dev)) | ||
1630 | return -EINVAL; | ||
1631 | return 0; | ||
1632 | } | ||
1633 | |||
1634 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1635 | { | ||
1636 | struct netdev_private *np = netdev_priv(dev); | ||
1637 | strcpy(info->driver, DRV_NAME); | ||
1638 | strcpy(info->version, DRV_VERSION); | ||
1639 | strcpy(info->bus_info, pci_name(np->pci_dev)); | ||
1640 | } | ||
1641 | |||
1642 | static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1643 | { | ||
1644 | struct netdev_private *np = netdev_priv(dev); | ||
1645 | spin_lock_irq(&np->lock); | ||
1646 | mii_ethtool_gset(&np->mii_if, ecmd); | ||
1647 | spin_unlock_irq(&np->lock); | ||
1648 | return 0; | ||
1649 | } | ||
1650 | |||
1651 | static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1652 | { | ||
1653 | struct netdev_private *np = netdev_priv(dev); | ||
1654 | int res; | ||
1655 | spin_lock_irq(&np->lock); | ||
1656 | res = mii_ethtool_sset(&np->mii_if, ecmd); | ||
1657 | spin_unlock_irq(&np->lock); | ||
1658 | return res; | ||
1659 | } | ||
1660 | |||
1661 | static int nway_reset(struct net_device *dev) | ||
1662 | { | ||
1663 | struct netdev_private *np = netdev_priv(dev); | ||
1664 | return mii_nway_restart(&np->mii_if); | ||
1665 | } | ||
1666 | |||
1667 | static u32 get_link(struct net_device *dev) | ||
1668 | { | ||
1669 | struct netdev_private *np = netdev_priv(dev); | ||
1670 | return mii_link_ok(&np->mii_if); | ||
1671 | } | ||
1672 | |||
1673 | static u32 get_msglevel(struct net_device *dev) | ||
1674 | { | ||
1675 | struct netdev_private *np = netdev_priv(dev); | ||
1676 | return np->msg_enable; | ||
1677 | } | ||
1678 | |||
1679 | static void set_msglevel(struct net_device *dev, u32 val) | ||
1680 | { | ||
1681 | struct netdev_private *np = netdev_priv(dev); | ||
1682 | np->msg_enable = val; | ||
1683 | } | ||
1684 | |||
1685 | static void get_strings(struct net_device *dev, u32 stringset, | ||
1686 | u8 *data) | ||
1687 | { | ||
1688 | if (stringset == ETH_SS_STATS) | ||
1689 | memcpy(data, sundance_stats, sizeof(sundance_stats)); | ||
1690 | } | ||
1691 | |||
1692 | static int get_sset_count(struct net_device *dev, int sset) | ||
1693 | { | ||
1694 | switch (sset) { | ||
1695 | case ETH_SS_STATS: | ||
1696 | return ARRAY_SIZE(sundance_stats); | ||
1697 | default: | ||
1698 | return -EOPNOTSUPP; | ||
1699 | } | ||
1700 | } | ||
1701 | |||
1702 | static void get_ethtool_stats(struct net_device *dev, | ||
1703 | struct ethtool_stats *stats, u64 *data) | ||
1704 | { | ||
1705 | struct netdev_private *np = netdev_priv(dev); | ||
1706 | int i = 0; | ||
1707 | |||
1708 | get_stats(dev); | ||
1709 | data[i++] = np->xstats.tx_multiple_collisions; | ||
1710 | data[i++] = np->xstats.tx_single_collisions; | ||
1711 | data[i++] = np->xstats.tx_late_collisions; | ||
1712 | data[i++] = np->xstats.tx_deferred; | ||
1713 | data[i++] = np->xstats.tx_deferred_excessive; | ||
1714 | data[i++] = np->xstats.tx_aborted; | ||
1715 | data[i++] = np->xstats.tx_bcasts; | ||
1716 | data[i++] = np->xstats.rx_bcasts; | ||
1717 | data[i++] = np->xstats.tx_mcasts; | ||
1718 | data[i++] = np->xstats.rx_mcasts; | ||
1719 | } | ||
1720 | |||
1721 | static const struct ethtool_ops ethtool_ops = { | ||
1722 | .begin = check_if_running, | ||
1723 | .get_drvinfo = get_drvinfo, | ||
1724 | .get_settings = get_settings, | ||
1725 | .set_settings = set_settings, | ||
1726 | .nway_reset = nway_reset, | ||
1727 | .get_link = get_link, | ||
1728 | .get_msglevel = get_msglevel, | ||
1729 | .set_msglevel = set_msglevel, | ||
1730 | .get_strings = get_strings, | ||
1731 | .get_sset_count = get_sset_count, | ||
1732 | .get_ethtool_stats = get_ethtool_stats, | ||
1733 | }; | ||
1734 | |||
1735 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1736 | { | ||
1737 | struct netdev_private *np = netdev_priv(dev); | ||
1738 | int rc; | ||
1739 | |||
1740 | if (!netif_running(dev)) | ||
1741 | return -EINVAL; | ||
1742 | |||
1743 | spin_lock_irq(&np->lock); | ||
1744 | rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); | ||
1745 | spin_unlock_irq(&np->lock); | ||
1746 | |||
1747 | return rc; | ||
1748 | } | ||
1749 | |||
1750 | static int netdev_close(struct net_device *dev) | ||
1751 | { | ||
1752 | struct netdev_private *np = netdev_priv(dev); | ||
1753 | void __iomem *ioaddr = np->base; | ||
1754 | struct sk_buff *skb; | ||
1755 | int i; | ||
1756 | |||
1757 | /* Wait and kill tasklet */ | ||
1758 | tasklet_kill(&np->rx_tasklet); | ||
1759 | tasklet_kill(&np->tx_tasklet); | ||
1760 | np->cur_tx = 0; | ||
1761 | np->dirty_tx = 0; | ||
1762 | np->cur_task = 0; | ||
1763 | np->last_tx = NULL; | ||
1764 | |||
1765 | netif_stop_queue(dev); | ||
1766 | |||
1767 | if (netif_msg_ifdown(np)) { | ||
1768 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " | ||
1769 | "Rx %4.4x Int %2.2x.\n", | ||
1770 | dev->name, ioread8(ioaddr + TxStatus), | ||
1771 | ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); | ||
1772 | printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", | ||
1773 | dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); | ||
1774 | } | ||
1775 | |||
1776 | /* Disable interrupts by clearing the interrupt mask. */ | ||
1777 | iowrite16(0x0000, ioaddr + IntrEnable); | ||
1778 | |||
1779 | /* Disable Rx and Tx DMA for safely release resource */ | ||
1780 | iowrite32(0x500, ioaddr + DMACtrl); | ||
1781 | |||
1782 | /* Stop the chip's Tx and Rx processes. */ | ||
1783 | iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); | ||
1784 | |||
1785 | for (i = 2000; i > 0; i--) { | ||
1786 | if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) | ||
1787 | break; | ||
1788 | mdelay(1); | ||
1789 | } | ||
1790 | |||
1791 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, | ||
1792 | ioaddr + ASIC_HI_WORD(ASICCtrl)); | ||
1793 | |||
1794 | for (i = 2000; i > 0; i--) { | ||
1795 | if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) | ||
1796 | break; | ||
1797 | mdelay(1); | ||
1798 | } | ||
1799 | |||
1800 | #ifdef __i386__ | ||
1801 | if (netif_msg_hw(np)) { | ||
1802 | printk(KERN_DEBUG " Tx ring at %8.8x:\n", | ||
1803 | (int)(np->tx_ring_dma)); | ||
1804 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1805 | printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", | ||
1806 | i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, | ||
1807 | np->tx_ring[i].frag[0].length); | ||
1808 | printk(KERN_DEBUG " Rx ring %8.8x:\n", | ||
1809 | (int)(np->rx_ring_dma)); | ||
1810 | for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { | ||
1811 | printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", | ||
1812 | i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, | ||
1813 | np->rx_ring[i].frag[0].length); | ||
1814 | } | ||
1815 | } | ||
1816 | #endif /* __i386__ debugging only */ | ||
1817 | |||
1818 | free_irq(dev->irq, dev); | ||
1819 | |||
1820 | del_timer_sync(&np->timer); | ||
1821 | |||
1822 | /* Free all the skbuffs in the Rx queue. */ | ||
1823 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1824 | np->rx_ring[i].status = 0; | ||
1825 | skb = np->rx_skbuff[i]; | ||
1826 | if (skb) { | ||
1827 | dma_unmap_single(&np->pci_dev->dev, | ||
1828 | le32_to_cpu(np->rx_ring[i].frag[0].addr), | ||
1829 | np->rx_buf_sz, DMA_FROM_DEVICE); | ||
1830 | dev_kfree_skb(skb); | ||
1831 | np->rx_skbuff[i] = NULL; | ||
1832 | } | ||
1833 | np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ | ||
1834 | } | ||
1835 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1836 | np->tx_ring[i].next_desc = 0; | ||
1837 | skb = np->tx_skbuff[i]; | ||
1838 | if (skb) { | ||
1839 | dma_unmap_single(&np->pci_dev->dev, | ||
1840 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | ||
1841 | skb->len, DMA_TO_DEVICE); | ||
1842 | dev_kfree_skb(skb); | ||
1843 | np->tx_skbuff[i] = NULL; | ||
1844 | } | ||
1845 | } | ||
1846 | |||
1847 | return 0; | ||
1848 | } | ||
1849 | |||
1850 | static void __devexit sundance_remove1 (struct pci_dev *pdev) | ||
1851 | { | ||
1852 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1853 | |||
1854 | if (dev) { | ||
1855 | struct netdev_private *np = netdev_priv(dev); | ||
1856 | unregister_netdev(dev); | ||
1857 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, | ||
1858 | np->rx_ring, np->rx_ring_dma); | ||
1859 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, | ||
1860 | np->tx_ring, np->tx_ring_dma); | ||
1861 | pci_iounmap(pdev, np->base); | ||
1862 | pci_release_regions(pdev); | ||
1863 | free_netdev(dev); | ||
1864 | pci_set_drvdata(pdev, NULL); | ||
1865 | } | ||
1866 | } | ||
1867 | |||
1868 | #ifdef CONFIG_PM | ||
1869 | |||
1870 | static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) | ||
1871 | { | ||
1872 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1873 | |||
1874 | if (!netif_running(dev)) | ||
1875 | return 0; | ||
1876 | |||
1877 | netdev_close(dev); | ||
1878 | netif_device_detach(dev); | ||
1879 | |||
1880 | pci_save_state(pci_dev); | ||
1881 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | ||
1882 | |||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | static int sundance_resume(struct pci_dev *pci_dev) | ||
1887 | { | ||
1888 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1889 | int err = 0; | ||
1890 | |||
1891 | if (!netif_running(dev)) | ||
1892 | return 0; | ||
1893 | |||
1894 | pci_set_power_state(pci_dev, PCI_D0); | ||
1895 | pci_restore_state(pci_dev); | ||
1896 | |||
1897 | err = netdev_open(dev); | ||
1898 | if (err) { | ||
1899 | printk(KERN_ERR "%s: Can't resume interface!\n", | ||
1900 | dev->name); | ||
1901 | goto out; | ||
1902 | } | ||
1903 | |||
1904 | netif_device_attach(dev); | ||
1905 | |||
1906 | out: | ||
1907 | return err; | ||
1908 | } | ||
1909 | |||
1910 | #endif /* CONFIG_PM */ | ||
1911 | |||
1912 | static struct pci_driver sundance_driver = { | ||
1913 | .name = DRV_NAME, | ||
1914 | .id_table = sundance_pci_tbl, | ||
1915 | .probe = sundance_probe1, | ||
1916 | .remove = __devexit_p(sundance_remove1), | ||
1917 | #ifdef CONFIG_PM | ||
1918 | .suspend = sundance_suspend, | ||
1919 | .resume = sundance_resume, | ||
1920 | #endif /* CONFIG_PM */ | ||
1921 | }; | ||
1922 | |||
1923 | static int __init sundance_init(void) | ||
1924 | { | ||
1925 | /* when a module, this is printed whether or not devices are found in probe */ | ||
1926 | #ifdef MODULE | ||
1927 | printk(version); | ||
1928 | #endif | ||
1929 | return pci_register_driver(&sundance_driver); | ||
1930 | } | ||
1931 | |||
1932 | static void __exit sundance_exit(void) | ||
1933 | { | ||
1934 | pci_unregister_driver(&sundance_driver); | ||
1935 | } | ||
1936 | |||
1937 | module_init(sundance_init); | ||
1938 | module_exit(sundance_exit); | ||
1939 | |||
1940 | |||