aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mv643xx_eth.h18
-rw-r--r--drivers/net/pcnet32.c4143
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skge.c275
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c583
-rw-r--r--drivers/net/sky2.h22
-rw-r--r--drivers/net/smc91x.c53
-rw-r--r--drivers/net/smc91x.h474
-rw-r--r--drivers/net/tg3.c32
11 files changed, 2973 insertions, 2642 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e0b11095b9da..00993e8ba589 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1914,7 +1914,7 @@ config E1000_DISABLE_PACKET_SPLIT
1914 depends on E1000 1914 depends on E1000
1915 help 1915 help
1916 Say Y here if you want to use the legacy receive path for PCI express 1916 Say Y here if you want to use the legacy receive path for PCI express
1917 hadware. 1917 hardware.
1918 1918
1919 If in doubt, say N. 1919 If in doubt, say N.
1920 1920
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 7754d1974b9e..4262c1da6d4a 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -42,13 +42,23 @@
42#define MAX_DESCS_PER_SKB 1 42#define MAX_DESCS_PER_SKB 1
43#endif 43#endif
44 44
45/*
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
49 */
50#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51#define ETH_DMA_ALIGN L1_CACHE_BYTES
52#else
53#define ETH_DMA_ALIGN 8
54#endif
55
45#define ETH_VLAN_HLEN 4 56#define ETH_VLAN_HLEN 4
46#define ETH_FCS_LEN 4 57#define ETH_FCS_LEN 4
47#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ 58#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
48#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
49#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ 59#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
50 ETH_VLAN_HLEN + ETH_FCS_LEN) 60 ETH_VLAN_HLEN + ETH_FCS_LEN)
51#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) 61#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
52 62
53#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ 63#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
54#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ 64#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 7e900572eaf8..9595f74da93f 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,12 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.31c" 25#define DRV_VERSION "1.32"
26#define DRV_RELDATE "01.Nov.2005" 26#define DRV_RELDATE "18.Mar.2006"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char * const version = 29static const char *const version =
30DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; 30 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
58 * PCI device identifiers for "new style" Linux PCI Device Drivers 58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */ 59 */
60static struct pci_device_id pcnet32_pci_tbl[] = { 60static struct pci_device_id pcnet32_pci_tbl[] = {
61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME,
62 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 62 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
63 /* 63 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
64 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have 64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 * the incorrect vendor id. 65
66 */ 66 /*
67 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 67 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
68 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 }, 68 * the incorrect vendor id.
69 { 0, } 69 */
70 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE,
71 PCI_ANY_ID, PCI_ANY_ID,
72 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
73
74 { } /* terminate list */
70}; 75};
71 76
72MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl); 77MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
73 78
74static int cards_found; 79static int cards_found;
75 80
@@ -77,13 +82,11 @@ static int cards_found;
77 * VLB I/O addresses 82 * VLB I/O addresses
78 */ 83 */
79static unsigned int pcnet32_portlist[] __initdata = 84static unsigned int pcnet32_portlist[] __initdata =
80 { 0x300, 0x320, 0x340, 0x360, 0 }; 85 { 0x300, 0x320, 0x340, 0x360, 0 };
81
82
83 86
84static int pcnet32_debug = 0; 87static int pcnet32_debug = 0;
85static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ 88static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
86static int pcnet32vlb; /* check for VLB cards ? */ 89static int pcnet32vlb; /* check for VLB cards ? */
87 90
88static struct net_device *pcnet32_dev; 91static struct net_device *pcnet32_dev;
89 92
@@ -110,32 +113,34 @@ static int rx_copybreak = 200;
110 * to internal options 113 * to internal options
111 */ 114 */
112static const unsigned char options_mapping[] = { 115static const unsigned char options_mapping[] = {
113 PCNET32_PORT_ASEL, /* 0 Auto-select */ 116 PCNET32_PORT_ASEL, /* 0 Auto-select */
114 PCNET32_PORT_AUI, /* 1 BNC/AUI */ 117 PCNET32_PORT_AUI, /* 1 BNC/AUI */
115 PCNET32_PORT_AUI, /* 2 AUI/BNC */ 118 PCNET32_PORT_AUI, /* 2 AUI/BNC */
116 PCNET32_PORT_ASEL, /* 3 not supported */ 119 PCNET32_PORT_ASEL, /* 3 not supported */
117 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ 120 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
118 PCNET32_PORT_ASEL, /* 5 not supported */ 121 PCNET32_PORT_ASEL, /* 5 not supported */
119 PCNET32_PORT_ASEL, /* 6 not supported */ 122 PCNET32_PORT_ASEL, /* 6 not supported */
120 PCNET32_PORT_ASEL, /* 7 not supported */ 123 PCNET32_PORT_ASEL, /* 7 not supported */
121 PCNET32_PORT_ASEL, /* 8 not supported */ 124 PCNET32_PORT_ASEL, /* 8 not supported */
122 PCNET32_PORT_MII, /* 9 MII 10baseT */ 125 PCNET32_PORT_MII, /* 9 MII 10baseT */
123 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ 126 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
124 PCNET32_PORT_MII, /* 11 MII (autosel) */ 127 PCNET32_PORT_MII, /* 11 MII (autosel) */
125 PCNET32_PORT_10BT, /* 12 10BaseT */ 128 PCNET32_PORT_10BT, /* 12 10BaseT */
126 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ 129 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
127 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ 130 /* 14 MII 100BaseTx-FD */
128 PCNET32_PORT_ASEL /* 15 not supported */ 131 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
132 PCNET32_PORT_ASEL /* 15 not supported */
129}; 133};
130 134
131static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { 135static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
132 "Loopback test (offline)" 136 "Loopback test (offline)"
133}; 137};
138
134#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) 139#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
135 140
136#define PCNET32_NUM_REGS 168 141#define PCNET32_NUM_REGS 136
137 142
138#define MAX_UNITS 8 /* More are supported, limit only on options */ 143#define MAX_UNITS 8 /* More are supported, limit only on options */
139static int options[MAX_UNITS]; 144static int options[MAX_UNITS];
140static int full_duplex[MAX_UNITS]; 145static int full_duplex[MAX_UNITS];
141static int homepna[MAX_UNITS]; 146static int homepna[MAX_UNITS];
@@ -151,124 +156,6 @@ static int homepna[MAX_UNITS];
151 */ 156 */
152 157
153/* 158/*
154 * History:
155 * v0.01: Initial version
156 * only tested on Alpha Noname Board
157 * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
158 * tested on a ASUS SP3G
159 * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
160 * looks like the 974 doesn't like stopping and restarting in a
161 * short period of time; now we do a reinit of the lance; the
162 * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
163 * and hangs the machine (thanks to Klaus Liedl for debugging)
164 * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
165 * made it standalone (no need for lance.c)
166 * v0.13: added additional PCI detecting for special PCI devices (Compaq)
167 * v0.14: stripped down additional PCI probe (thanks to David C Niemi
168 * and sveneric@xs4all.nl for testing this on their Compaq boxes)
169 * v0.15: added 79C965 (VLB) probe
170 * added interrupt sharing for PCI chips
171 * v0.16: fixed set_multicast_list on Alpha machines
172 * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
173 * v0.19: changed setting of autoselect bit
174 * v0.20: removed additional Compaq PCI probe; there is now a working one
175 * in arch/i386/bios32.c
176 * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
177 * v0.22: added printing of status to ring dump
178 * v0.23: changed enet_statistics to net_devive_stats
179 * v0.90: added multicast filter
180 * added module support
181 * changed irq probe to new style
182 * added PCnetFast chip id
183 * added fix for receive stalls with Intel saturn chipsets
184 * added in-place rx skbs like in the tulip driver
185 * minor cleanups
186 * v0.91: added PCnetFast+ chip id
187 * back port to 2.0.x
188 * v1.00: added some stuff from Donald Becker's 2.0.34 version
189 * added support for byte counters in net_dev_stats
190 * v1.01: do ring dumps, only when debugging the driver
191 * increased the transmit timeout
192 * v1.02: fixed memory leak in pcnet32_init_ring()
193 * v1.10: workaround for stopped transmitter
194 * added port selection for modules
195 * detect special T1/E1 WAN card and setup port selection
196 * v1.11: fixed wrong checking of Tx errors
197 * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
198 * added save original kmalloc addr for freeing (mcr@solidum.com)
199 * added support for PCnetHome chip (joe@MIT.EDU)
200 * rewritten PCI card detection
201 * added dwio mode to get driver working on some PPC machines
202 * v1.21: added mii selection and mii ioctl
203 * v1.22: changed pci scanning code to make PPC people happy
204 * fixed switching to 32bit mode in pcnet32_open() (thanks
205 * to Michael Richard <mcr@solidum.com> for noticing this one)
206 * added sub vendor/device id matching (thanks again to
207 * Michael Richard <mcr@solidum.com>)
208 * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
209 * v1.23 fixed small bug, when manual selecting MII speed/duplex
210 * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
211 * underflows. Added tx_start_pt module parameter. Increased
212 * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
213 * for FAST[+] chipsets. <kaf@fc.hp.com>
214 * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
215 * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
216 * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
217 * <jamey@crl.dec.com>
218 * - Fixed a few bugs, related to running the controller in 32bit mode.
219 * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
220 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
221 * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
222 * v1.27 improved CSR/PROM address detection, lots of cleanups,
223 * new pcnet32vlb module option, HP-PARISC support,
224 * added module parameter descriptions,
225 * initial ethtool support - Helge Deller <deller@gmx.de>
226 * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
227 * use alloc_etherdev and register_netdev
228 * fix pci probe not increment cards_found
229 * FD auto negotiate error workaround for xSeries250
230 * clean up and using new mii module
231 * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
232 * Added timer for cable connection state changes.
233 * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com>
234 * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com>
235 * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl.
236 * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single
237 * length errors, and transmit hangs. Cleans up after errors in open.
238 * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test.
239 * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections.
240 * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical
241 * identification code (blink led's) and register dump.
242 * Don Fry added timer for 971/972 so skbufs don't remain on tx ring
243 * forever.
244 * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt
245 * (ltint) as they added complexity and didn't give good throughput.
246 * v1.30a 22 May 2004 Don Fry limit frames received during interrupt.
247 * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973,
248 * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>.
249 * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart.
250 * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets.
251 * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang.
252 * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI,
253 * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>,
254 * and Brian Murphy <brian@murphy.dk>.
255 * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option
256 * homepna for selecting HomePNA mode for PCNet/Home 79C978.
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged.
263 * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
264 * if allocation fails
265 * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
266 * Force 100Mbit FD if Auto (ASEL) is selected.
267 * See Bugzilla 2669 and 4551.
268 */
269
270
271/*
272 * Set the number of Tx and Rx buffers, using Log_2(# buffers). 159 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
273 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. 160 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
274 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 161 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
@@ -303,42 +190,42 @@ static int homepna[MAX_UNITS];
303 190
304/* The PCNET32 Rx and Tx ring descriptors. */ 191/* The PCNET32 Rx and Tx ring descriptors. */
305struct pcnet32_rx_head { 192struct pcnet32_rx_head {
306 u32 base; 193 u32 base;
307 s16 buf_length; 194 s16 buf_length;
308 s16 status; 195 s16 status;
309 u32 msg_length; 196 u32 msg_length;
310 u32 reserved; 197 u32 reserved;
311}; 198};
312 199
313struct pcnet32_tx_head { 200struct pcnet32_tx_head {
314 u32 base; 201 u32 base;
315 s16 length; 202 s16 length;
316 s16 status; 203 s16 status;
317 u32 misc; 204 u32 misc;
318 u32 reserved; 205 u32 reserved;
319}; 206};
320 207
321/* The PCNET32 32-Bit initialization block, described in databook. */ 208/* The PCNET32 32-Bit initialization block, described in databook. */
322struct pcnet32_init_block { 209struct pcnet32_init_block {
323 u16 mode; 210 u16 mode;
324 u16 tlen_rlen; 211 u16 tlen_rlen;
325 u8 phys_addr[6]; 212 u8 phys_addr[6];
326 u16 reserved; 213 u16 reserved;
327 u32 filter[2]; 214 u32 filter[2];
328 /* Receive and transmit ring base, along with extra bits. */ 215 /* Receive and transmit ring base, along with extra bits. */
329 u32 rx_ring; 216 u32 rx_ring;
330 u32 tx_ring; 217 u32 tx_ring;
331}; 218};
332 219
333/* PCnet32 access functions */ 220/* PCnet32 access functions */
334struct pcnet32_access { 221struct pcnet32_access {
335 u16 (*read_csr)(unsigned long, int); 222 u16 (*read_csr) (unsigned long, int);
336 void (*write_csr)(unsigned long, int, u16); 223 void (*write_csr) (unsigned long, int, u16);
337 u16 (*read_bcr)(unsigned long, int); 224 u16 (*read_bcr) (unsigned long, int);
338 void (*write_bcr)(unsigned long, int, u16); 225 void (*write_bcr) (unsigned long, int, u16);
339 u16 (*read_rap)(unsigned long); 226 u16 (*read_rap) (unsigned long);
340 void (*write_rap)(unsigned long, u16); 227 void (*write_rap) (unsigned long, u16);
341 void (*reset)(unsigned long); 228 void (*reset) (unsigned long);
342}; 229};
343 230
344/* 231/*
@@ -346,760 +233,794 @@ struct pcnet32_access {
346 * so the structure should be allocated using pci_alloc_consistent(). 233 * so the structure should be allocated using pci_alloc_consistent().
347 */ 234 */
348struct pcnet32_private { 235struct pcnet32_private {
349 struct pcnet32_init_block init_block; 236 struct pcnet32_init_block init_block;
350 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ 237 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
351 struct pcnet32_rx_head *rx_ring; 238 struct pcnet32_rx_head *rx_ring;
352 struct pcnet32_tx_head *tx_ring; 239 struct pcnet32_tx_head *tx_ring;
353 dma_addr_t dma_addr; /* DMA address of beginning of this 240 dma_addr_t dma_addr;/* DMA address of beginning of this
354 object, returned by 241 object, returned by pci_alloc_consistent */
355 pci_alloc_consistent */ 242 struct pci_dev *pci_dev;
356 struct pci_dev *pci_dev; /* Pointer to the associated pci device 243 const char *name;
357 structure */ 244 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
358 const char *name; 245 struct sk_buff **tx_skbuff;
359 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 246 struct sk_buff **rx_skbuff;
360 struct sk_buff **tx_skbuff; 247 dma_addr_t *tx_dma_addr;
361 struct sk_buff **rx_skbuff; 248 dma_addr_t *rx_dma_addr;
362 dma_addr_t *tx_dma_addr; 249 struct pcnet32_access a;
363 dma_addr_t *rx_dma_addr; 250 spinlock_t lock; /* Guard lock */
364 struct pcnet32_access a; 251 unsigned int cur_rx, cur_tx; /* The next free ring entry */
365 spinlock_t lock; /* Guard lock */ 252 unsigned int rx_ring_size; /* current rx ring size */
366 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 253 unsigned int tx_ring_size; /* current tx ring size */
367 unsigned int rx_ring_size; /* current rx ring size */ 254 unsigned int rx_mod_mask; /* rx ring modular mask */
368 unsigned int tx_ring_size; /* current tx ring size */ 255 unsigned int tx_mod_mask; /* tx ring modular mask */
369 unsigned int rx_mod_mask; /* rx ring modular mask */ 256 unsigned short rx_len_bits;
370 unsigned int tx_mod_mask; /* tx ring modular mask */ 257 unsigned short tx_len_bits;
371 unsigned short rx_len_bits; 258 dma_addr_t rx_ring_dma_addr;
372 unsigned short tx_len_bits; 259 dma_addr_t tx_ring_dma_addr;
373 dma_addr_t rx_ring_dma_addr; 260 unsigned int dirty_rx, /* ring entries to be freed. */
374 dma_addr_t tx_ring_dma_addr; 261 dirty_tx;
375 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 262
376 struct net_device_stats stats; 263 struct net_device_stats stats;
377 char tx_full; 264 char tx_full;
378 int options; 265 char phycount; /* number of phys found */
379 unsigned int shared_irq:1, /* shared irq possible */ 266 int options;
380 dxsuflo:1, /* disable transmit stop on uflo */ 267 unsigned int shared_irq:1, /* shared irq possible */
381 mii:1; /* mii port available */ 268 dxsuflo:1, /* disable transmit stop on uflo */
382 struct net_device *next; 269 mii:1; /* mii port available */
383 struct mii_if_info mii_if; 270 struct net_device *next;
384 struct timer_list watchdog_timer; 271 struct mii_if_info mii_if;
385 struct timer_list blink_timer; 272 struct timer_list watchdog_timer;
386 u32 msg_enable; /* debug message level */ 273 struct timer_list blink_timer;
274 u32 msg_enable; /* debug message level */
275
276 /* each bit indicates an available PHY */
277 u32 phymask;
387}; 278};
388 279
389static void pcnet32_probe_vlbus(void); 280static void pcnet32_probe_vlbus(void);
390static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 281static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
391static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 282static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
392static int pcnet32_open(struct net_device *); 283static int pcnet32_open(struct net_device *);
393static int pcnet32_init_ring(struct net_device *); 284static int pcnet32_init_ring(struct net_device *);
394static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); 285static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
395static int pcnet32_rx(struct net_device *); 286static int pcnet32_rx(struct net_device *);
396static void pcnet32_tx_timeout (struct net_device *dev); 287static void pcnet32_tx_timeout(struct net_device *dev);
397static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); 288static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
398static int pcnet32_close(struct net_device *); 289static int pcnet32_close(struct net_device *);
399static struct net_device_stats *pcnet32_get_stats(struct net_device *); 290static struct net_device_stats *pcnet32_get_stats(struct net_device *);
400static void pcnet32_load_multicast(struct net_device *dev); 291static void pcnet32_load_multicast(struct net_device *dev);
401static void pcnet32_set_multicast_list(struct net_device *); 292static void pcnet32_set_multicast_list(struct net_device *);
402static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); 293static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
403static void pcnet32_watchdog(struct net_device *); 294static void pcnet32_watchdog(struct net_device *);
404static int mdio_read(struct net_device *dev, int phy_id, int reg_num); 295static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
405static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); 296static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
297 int val);
406static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); 298static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
407static void pcnet32_ethtool_test(struct net_device *dev, 299static void pcnet32_ethtool_test(struct net_device *dev,
408 struct ethtool_test *eth_test, u64 *data); 300 struct ethtool_test *eth_test, u64 * data);
409static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1); 301static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
410static int pcnet32_phys_id(struct net_device *dev, u32 data); 302static int pcnet32_phys_id(struct net_device *dev, u32 data);
411static void pcnet32_led_blink_callback(struct net_device *dev); 303static void pcnet32_led_blink_callback(struct net_device *dev);
412static int pcnet32_get_regs_len(struct net_device *dev); 304static int pcnet32_get_regs_len(struct net_device *dev);
413static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 305static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
414 void *ptr); 306 void *ptr);
415static void pcnet32_purge_tx_ring(struct net_device *dev); 307static void pcnet32_purge_tx_ring(struct net_device *dev);
416static int pcnet32_alloc_ring(struct net_device *dev, char *name); 308static int pcnet32_alloc_ring(struct net_device *dev, char *name);
417static void pcnet32_free_ring(struct net_device *dev); 309static void pcnet32_free_ring(struct net_device *dev);
418 310static void pcnet32_check_media(struct net_device *dev, int verbose);
419 311
420enum pci_flags_bit { 312enum pci_flags_bit {
421 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 313 PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
422 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, 314 PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
315 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
423}; 316};
424 317
425 318static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
426static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
427{ 319{
428 outw (index, addr+PCNET32_WIO_RAP); 320 outw(index, addr + PCNET32_WIO_RAP);
429 return inw (addr+PCNET32_WIO_RDP); 321 return inw(addr + PCNET32_WIO_RDP);
430} 322}
431 323
432static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val) 324static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
433{ 325{
434 outw (index, addr+PCNET32_WIO_RAP); 326 outw(index, addr + PCNET32_WIO_RAP);
435 outw (val, addr+PCNET32_WIO_RDP); 327 outw(val, addr + PCNET32_WIO_RDP);
436} 328}
437 329
438static u16 pcnet32_wio_read_bcr (unsigned long addr, int index) 330static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
439{ 331{
440 outw (index, addr+PCNET32_WIO_RAP); 332 outw(index, addr + PCNET32_WIO_RAP);
441 return inw (addr+PCNET32_WIO_BDP); 333 return inw(addr + PCNET32_WIO_BDP);
442} 334}
443 335
444static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val) 336static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
445{ 337{
446 outw (index, addr+PCNET32_WIO_RAP); 338 outw(index, addr + PCNET32_WIO_RAP);
447 outw (val, addr+PCNET32_WIO_BDP); 339 outw(val, addr + PCNET32_WIO_BDP);
448} 340}
449 341
450static u16 pcnet32_wio_read_rap (unsigned long addr) 342static u16 pcnet32_wio_read_rap(unsigned long addr)
451{ 343{
452 return inw (addr+PCNET32_WIO_RAP); 344 return inw(addr + PCNET32_WIO_RAP);
453} 345}
454 346
455static void pcnet32_wio_write_rap (unsigned long addr, u16 val) 347static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
456{ 348{
457 outw (val, addr+PCNET32_WIO_RAP); 349 outw(val, addr + PCNET32_WIO_RAP);
458} 350}
459 351
460static void pcnet32_wio_reset (unsigned long addr) 352static void pcnet32_wio_reset(unsigned long addr)
461{ 353{
462 inw (addr+PCNET32_WIO_RESET); 354 inw(addr + PCNET32_WIO_RESET);
463} 355}
464 356
465static int pcnet32_wio_check (unsigned long addr) 357static int pcnet32_wio_check(unsigned long addr)
466{ 358{
467 outw (88, addr+PCNET32_WIO_RAP); 359 outw(88, addr + PCNET32_WIO_RAP);
468 return (inw (addr+PCNET32_WIO_RAP) == 88); 360 return (inw(addr + PCNET32_WIO_RAP) == 88);
469} 361}
470 362
471static struct pcnet32_access pcnet32_wio = { 363static struct pcnet32_access pcnet32_wio = {
472 .read_csr = pcnet32_wio_read_csr, 364 .read_csr = pcnet32_wio_read_csr,
473 .write_csr = pcnet32_wio_write_csr, 365 .write_csr = pcnet32_wio_write_csr,
474 .read_bcr = pcnet32_wio_read_bcr, 366 .read_bcr = pcnet32_wio_read_bcr,
475 .write_bcr = pcnet32_wio_write_bcr, 367 .write_bcr = pcnet32_wio_write_bcr,
476 .read_rap = pcnet32_wio_read_rap, 368 .read_rap = pcnet32_wio_read_rap,
477 .write_rap = pcnet32_wio_write_rap, 369 .write_rap = pcnet32_wio_write_rap,
478 .reset = pcnet32_wio_reset 370 .reset = pcnet32_wio_reset
479}; 371};
480 372
481static u16 pcnet32_dwio_read_csr (unsigned long addr, int index) 373static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
482{ 374{
483 outl (index, addr+PCNET32_DWIO_RAP); 375 outl(index, addr + PCNET32_DWIO_RAP);
484 return (inl (addr+PCNET32_DWIO_RDP) & 0xffff); 376 return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
485} 377}
486 378
487static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val) 379static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
488{ 380{
489 outl (index, addr+PCNET32_DWIO_RAP); 381 outl(index, addr + PCNET32_DWIO_RAP);
490 outl (val, addr+PCNET32_DWIO_RDP); 382 outl(val, addr + PCNET32_DWIO_RDP);
491} 383}
492 384
493static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index) 385static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
494{ 386{
495 outl (index, addr+PCNET32_DWIO_RAP); 387 outl(index, addr + PCNET32_DWIO_RAP);
496 return (inl (addr+PCNET32_DWIO_BDP) & 0xffff); 388 return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
497} 389}
498 390
499static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val) 391static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
500{ 392{
501 outl (index, addr+PCNET32_DWIO_RAP); 393 outl(index, addr + PCNET32_DWIO_RAP);
502 outl (val, addr+PCNET32_DWIO_BDP); 394 outl(val, addr + PCNET32_DWIO_BDP);
503} 395}
504 396
505static u16 pcnet32_dwio_read_rap (unsigned long addr) 397static u16 pcnet32_dwio_read_rap(unsigned long addr)
506{ 398{
507 return (inl (addr+PCNET32_DWIO_RAP) & 0xffff); 399 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
508} 400}
509 401
510static void pcnet32_dwio_write_rap (unsigned long addr, u16 val) 402static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
511{ 403{
512 outl (val, addr+PCNET32_DWIO_RAP); 404 outl(val, addr + PCNET32_DWIO_RAP);
513} 405}
514 406
515static void pcnet32_dwio_reset (unsigned long addr) 407static void pcnet32_dwio_reset(unsigned long addr)
516{ 408{
517 inl (addr+PCNET32_DWIO_RESET); 409 inl(addr + PCNET32_DWIO_RESET);
518} 410}
519 411
520static int pcnet32_dwio_check (unsigned long addr) 412static int pcnet32_dwio_check(unsigned long addr)
521{ 413{
522 outl (88, addr+PCNET32_DWIO_RAP); 414 outl(88, addr + PCNET32_DWIO_RAP);
523 return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88); 415 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
524} 416}
525 417
526static struct pcnet32_access pcnet32_dwio = { 418static struct pcnet32_access pcnet32_dwio = {
527 .read_csr = pcnet32_dwio_read_csr, 419 .read_csr = pcnet32_dwio_read_csr,
528 .write_csr = pcnet32_dwio_write_csr, 420 .write_csr = pcnet32_dwio_write_csr,
529 .read_bcr = pcnet32_dwio_read_bcr, 421 .read_bcr = pcnet32_dwio_read_bcr,
530 .write_bcr = pcnet32_dwio_write_bcr, 422 .write_bcr = pcnet32_dwio_write_bcr,
531 .read_rap = pcnet32_dwio_read_rap, 423 .read_rap = pcnet32_dwio_read_rap,
532 .write_rap = pcnet32_dwio_write_rap, 424 .write_rap = pcnet32_dwio_write_rap,
533 .reset = pcnet32_dwio_reset 425 .reset = pcnet32_dwio_reset
534}; 426};
535 427
536#ifdef CONFIG_NET_POLL_CONTROLLER 428#ifdef CONFIG_NET_POLL_CONTROLLER
537static void pcnet32_poll_controller(struct net_device *dev) 429static void pcnet32_poll_controller(struct net_device *dev)
538{ 430{
539 disable_irq(dev->irq); 431 disable_irq(dev->irq);
540 pcnet32_interrupt(0, dev, NULL); 432 pcnet32_interrupt(0, dev, NULL);
541 enable_irq(dev->irq); 433 enable_irq(dev->irq);
542} 434}
543#endif 435#endif
544 436
545
546static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 437static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
547{ 438{
548 struct pcnet32_private *lp = dev->priv; 439 struct pcnet32_private *lp = dev->priv;
549 unsigned long flags; 440 unsigned long flags;
550 int r = -EOPNOTSUPP; 441 int r = -EOPNOTSUPP;
551 442
552 if (lp->mii) { 443 if (lp->mii) {
553 spin_lock_irqsave(&lp->lock, flags); 444 spin_lock_irqsave(&lp->lock, flags);
554 mii_ethtool_gset(&lp->mii_if, cmd); 445 mii_ethtool_gset(&lp->mii_if, cmd);
555 spin_unlock_irqrestore(&lp->lock, flags); 446 spin_unlock_irqrestore(&lp->lock, flags);
556 r = 0; 447 r = 0;
557 } 448 }
558 return r; 449 return r;
559} 450}
560 451
561static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 452static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
562{ 453{
563 struct pcnet32_private *lp = dev->priv; 454 struct pcnet32_private *lp = dev->priv;
564 unsigned long flags; 455 unsigned long flags;
565 int r = -EOPNOTSUPP; 456 int r = -EOPNOTSUPP;
566 457
567 if (lp->mii) { 458 if (lp->mii) {
568 spin_lock_irqsave(&lp->lock, flags); 459 spin_lock_irqsave(&lp->lock, flags);
569 r = mii_ethtool_sset(&lp->mii_if, cmd); 460 r = mii_ethtool_sset(&lp->mii_if, cmd);
570 spin_unlock_irqrestore(&lp->lock, flags); 461 spin_unlock_irqrestore(&lp->lock, flags);
571 } 462 }
572 return r; 463 return r;
573} 464}
574 465
575static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 466static void pcnet32_get_drvinfo(struct net_device *dev,
467 struct ethtool_drvinfo *info)
576{ 468{
577 struct pcnet32_private *lp = dev->priv; 469 struct pcnet32_private *lp = dev->priv;
578 470
579 strcpy (info->driver, DRV_NAME); 471 strcpy(info->driver, DRV_NAME);
580 strcpy (info->version, DRV_VERSION); 472 strcpy(info->version, DRV_VERSION);
581 if (lp->pci_dev) 473 if (lp->pci_dev)
582 strcpy (info->bus_info, pci_name(lp->pci_dev)); 474 strcpy(info->bus_info, pci_name(lp->pci_dev));
583 else 475 else
584 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); 476 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
585} 477}
586 478
587static u32 pcnet32_get_link(struct net_device *dev) 479static u32 pcnet32_get_link(struct net_device *dev)
588{ 480{
589 struct pcnet32_private *lp = dev->priv; 481 struct pcnet32_private *lp = dev->priv;
590 unsigned long flags; 482 unsigned long flags;
591 int r; 483 int r;
592
593 spin_lock_irqsave(&lp->lock, flags);
594 if (lp->mii) {
595 r = mii_link_ok(&lp->mii_if);
596 } else {
597 ulong ioaddr = dev->base_addr; /* card base I/O address */
598 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
599 }
600 spin_unlock_irqrestore(&lp->lock, flags);
601 484
602 return r; 485 spin_lock_irqsave(&lp->lock, flags);
486 if (lp->mii) {
487 r = mii_link_ok(&lp->mii_if);
488 } else {
489 ulong ioaddr = dev->base_addr; /* card base I/O address */
490 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
491 }
492 spin_unlock_irqrestore(&lp->lock, flags);
493
494 return r;
603} 495}
604 496
605static u32 pcnet32_get_msglevel(struct net_device *dev) 497static u32 pcnet32_get_msglevel(struct net_device *dev)
606{ 498{
607 struct pcnet32_private *lp = dev->priv; 499 struct pcnet32_private *lp = dev->priv;
608 return lp->msg_enable; 500 return lp->msg_enable;
609} 501}
610 502
611static void pcnet32_set_msglevel(struct net_device *dev, u32 value) 503static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
612{ 504{
613 struct pcnet32_private *lp = dev->priv; 505 struct pcnet32_private *lp = dev->priv;
614 lp->msg_enable = value; 506 lp->msg_enable = value;
615} 507}
616 508
617static int pcnet32_nway_reset(struct net_device *dev) 509static int pcnet32_nway_reset(struct net_device *dev)
618{ 510{
619 struct pcnet32_private *lp = dev->priv; 511 struct pcnet32_private *lp = dev->priv;
620 unsigned long flags; 512 unsigned long flags;
621 int r = -EOPNOTSUPP; 513 int r = -EOPNOTSUPP;
622 514
623 if (lp->mii) { 515 if (lp->mii) {
624 spin_lock_irqsave(&lp->lock, flags); 516 spin_lock_irqsave(&lp->lock, flags);
625 r = mii_nway_restart(&lp->mii_if); 517 r = mii_nway_restart(&lp->mii_if);
626 spin_unlock_irqrestore(&lp->lock, flags); 518 spin_unlock_irqrestore(&lp->lock, flags);
627 } 519 }
628 return r; 520 return r;
629} 521}
630 522
631static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 523static void pcnet32_get_ringparam(struct net_device *dev,
524 struct ethtool_ringparam *ering)
632{ 525{
633 struct pcnet32_private *lp = dev->priv; 526 struct pcnet32_private *lp = dev->priv;
634 527
635 ering->tx_max_pending = TX_MAX_RING_SIZE - 1; 528 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
636 ering->tx_pending = lp->tx_ring_size - 1; 529 ering->tx_pending = lp->tx_ring_size - 1;
637 ering->rx_max_pending = RX_MAX_RING_SIZE - 1; 530 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
638 ering->rx_pending = lp->rx_ring_size - 1; 531 ering->rx_pending = lp->rx_ring_size - 1;
639} 532}
640 533
641static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 534static int pcnet32_set_ringparam(struct net_device *dev,
535 struct ethtool_ringparam *ering)
642{ 536{
643 struct pcnet32_private *lp = dev->priv; 537 struct pcnet32_private *lp = dev->priv;
644 unsigned long flags; 538 unsigned long flags;
645 int i; 539 int i;
646 540
647 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 541 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
648 return -EINVAL; 542 return -EINVAL;
649 543
650 if (netif_running(dev)) 544 if (netif_running(dev))
651 pcnet32_close(dev); 545 pcnet32_close(dev);
652 546
653 spin_lock_irqsave(&lp->lock, flags); 547 spin_lock_irqsave(&lp->lock, flags);
654 pcnet32_free_ring(dev);
655 lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
656 lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
657
658 /* set the minimum ring size to 4, to allow the loopback test to work
659 * unchanged.
660 */
661 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
662 if (lp->tx_ring_size <= (1 << i))
663 break;
664 }
665 lp->tx_ring_size = (1 << i);
666 lp->tx_mod_mask = lp->tx_ring_size - 1;
667 lp->tx_len_bits = (i << 12);
668
669 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
670 if (lp->rx_ring_size <= (1 << i))
671 break;
672 }
673 lp->rx_ring_size = (1 << i);
674 lp->rx_mod_mask = lp->rx_ring_size - 1;
675 lp->rx_len_bits = (i << 4);
676
677 if (pcnet32_alloc_ring(dev, dev->name)) {
678 pcnet32_free_ring(dev); 548 pcnet32_free_ring(dev);
679 spin_unlock_irqrestore(&lp->lock, flags); 549 lp->tx_ring_size =
680 return -ENOMEM; 550 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
681 } 551 lp->rx_ring_size =
552 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
553
554 /* set the minimum ring size to 4, to allow the loopback test to work
555 * unchanged.
556 */
557 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
558 if (lp->tx_ring_size <= (1 << i))
559 break;
560 }
561 lp->tx_ring_size = (1 << i);
562 lp->tx_mod_mask = lp->tx_ring_size - 1;
563 lp->tx_len_bits = (i << 12);
682 564
683 spin_unlock_irqrestore(&lp->lock, flags); 565 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
566 if (lp->rx_ring_size <= (1 << i))
567 break;
568 }
569 lp->rx_ring_size = (1 << i);
570 lp->rx_mod_mask = lp->rx_ring_size - 1;
571 lp->rx_len_bits = (i << 4);
572
573 if (pcnet32_alloc_ring(dev, dev->name)) {
574 pcnet32_free_ring(dev);
575 spin_unlock_irqrestore(&lp->lock, flags);
576 return -ENOMEM;
577 }
684 578
685 if (pcnet32_debug & NETIF_MSG_DRV) 579 spin_unlock_irqrestore(&lp->lock, flags);
686 printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
687 dev->name, lp->rx_ring_size, lp->tx_ring_size);
688 580
689 if (netif_running(dev)) 581 if (pcnet32_debug & NETIF_MSG_DRV)
690 pcnet32_open(dev); 582 printk(KERN_INFO PFX
583 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
584 lp->rx_ring_size, lp->tx_ring_size);
691 585
692 return 0; 586 if (netif_running(dev))
587 pcnet32_open(dev);
588
589 return 0;
693} 590}
694 591
695static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) 592static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
593 u8 * data)
696{ 594{
697 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); 595 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
698} 596}
699 597
700static int pcnet32_self_test_count(struct net_device *dev) 598static int pcnet32_self_test_count(struct net_device *dev)
701{ 599{
702 return PCNET32_TEST_LEN; 600 return PCNET32_TEST_LEN;
703} 601}
704 602
705static void pcnet32_ethtool_test(struct net_device *dev, 603static void pcnet32_ethtool_test(struct net_device *dev,
706 struct ethtool_test *test, u64 *data) 604 struct ethtool_test *test, u64 * data)
707{ 605{
708 struct pcnet32_private *lp = dev->priv; 606 struct pcnet32_private *lp = dev->priv;
709 int rc; 607 int rc;
710 608
711 if (test->flags == ETH_TEST_FL_OFFLINE) { 609 if (test->flags == ETH_TEST_FL_OFFLINE) {
712 rc = pcnet32_loopback_test(dev, data); 610 rc = pcnet32_loopback_test(dev, data);
713 if (rc) { 611 if (rc) {
714 if (netif_msg_hw(lp)) 612 if (netif_msg_hw(lp))
715 printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name); 613 printk(KERN_DEBUG "%s: Loopback test failed.\n",
716 test->flags |= ETH_TEST_FL_FAILED; 614 dev->name);
615 test->flags |= ETH_TEST_FL_FAILED;
616 } else if (netif_msg_hw(lp))
617 printk(KERN_DEBUG "%s: Loopback test passed.\n",
618 dev->name);
717 } else if (netif_msg_hw(lp)) 619 } else if (netif_msg_hw(lp))
718 printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name); 620 printk(KERN_DEBUG
719 } else if (netif_msg_hw(lp)) 621 "%s: No tests to run (specify 'Offline' on ethtool).",
720 printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name); 622 dev->name);
721} /* end pcnet32_ethtool_test */ 623} /* end pcnet32_ethtool_test */
722 624
723static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) 625static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
724{ 626{
725 struct pcnet32_private *lp = dev->priv; 627 struct pcnet32_private *lp = dev->priv;
726 struct pcnet32_access *a = &lp->a; /* access to registers */ 628 struct pcnet32_access *a = &lp->a; /* access to registers */
727 ulong ioaddr = dev->base_addr; /* card base I/O address */ 629 ulong ioaddr = dev->base_addr; /* card base I/O address */
728 struct sk_buff *skb; /* sk buff */ 630 struct sk_buff *skb; /* sk buff */
729 int x, i; /* counters */ 631 int x, i; /* counters */
730 int numbuffs = 4; /* number of TX/RX buffers and descs */ 632 int numbuffs = 4; /* number of TX/RX buffers and descs */
731 u16 status = 0x8300; /* TX ring status */ 633 u16 status = 0x8300; /* TX ring status */
732 u16 teststatus; /* test of ring status */ 634 u16 teststatus; /* test of ring status */
733 int rc; /* return code */ 635 int rc; /* return code */
734 int size; /* size of packets */ 636 int size; /* size of packets */
735 unsigned char *packet; /* source packet data */ 637 unsigned char *packet; /* source packet data */
736 static const int data_len = 60; /* length of source packets */ 638 static const int data_len = 60; /* length of source packets */
737 unsigned long flags; 639 unsigned long flags;
738 unsigned long ticks; 640 unsigned long ticks;
739 641
740 *data1 = 1; /* status of test, default to fail */ 642 *data1 = 1; /* status of test, default to fail */
741 rc = 1; /* default to fail */ 643 rc = 1; /* default to fail */
742 644
743 if (netif_running(dev)) 645 if (netif_running(dev))
744 pcnet32_close(dev); 646 pcnet32_close(dev);
745 647
746 spin_lock_irqsave(&lp->lock, flags); 648 spin_lock_irqsave(&lp->lock, flags);
747 649
748 /* Reset the PCNET32 */ 650 /* Reset the PCNET32 */
749 lp->a.reset (ioaddr); 651 lp->a.reset(ioaddr);
750 652
751 /* switch pcnet32 to 32bit mode */ 653 /* switch pcnet32 to 32bit mode */
752 lp->a.write_bcr (ioaddr, 20, 2); 654 lp->a.write_bcr(ioaddr, 20, 2);
753 655
754 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 656 lp->init_block.mode =
755 lp->init_block.filter[0] = 0; 657 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
756 lp->init_block.filter[1] = 0; 658 lp->init_block.filter[0] = 0;
757 659 lp->init_block.filter[1] = 0;
758 /* purge & init rings but don't actually restart */ 660
759 pcnet32_restart(dev, 0x0000); 661 /* purge & init rings but don't actually restart */
760 662 pcnet32_restart(dev, 0x0000);
761 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 663
762 664 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
763 /* Initialize Transmit buffers. */ 665
764 size = data_len + 15; 666 /* Initialize Transmit buffers. */
765 for (x=0; x<numbuffs; x++) { 667 size = data_len + 15;
766 if (!(skb = dev_alloc_skb(size))) { 668 for (x = 0; x < numbuffs; x++) {
767 if (netif_msg_hw(lp)) 669 if (!(skb = dev_alloc_skb(size))) {
768 printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n", 670 if (netif_msg_hw(lp))
769 dev->name, __LINE__); 671 printk(KERN_DEBUG
770 goto clean_up; 672 "%s: Cannot allocate skb at line: %d!\n",
771 } else { 673 dev->name, __LINE__);
772 packet = skb->data; 674 goto clean_up;
773 skb_put(skb, size); /* create space for data */ 675 } else {
774 lp->tx_skbuff[x] = skb; 676 packet = skb->data;
775 lp->tx_ring[x].length = le16_to_cpu(-skb->len); 677 skb_put(skb, size); /* create space for data */
776 lp->tx_ring[x].misc = 0; 678 lp->tx_skbuff[x] = skb;
777 679 lp->tx_ring[x].length = le16_to_cpu(-skb->len);
778 /* put DA and SA into the skb */ 680 lp->tx_ring[x].misc = 0;
779 for (i=0; i<6; i++) 681
780 *packet++ = dev->dev_addr[i]; 682 /* put DA and SA into the skb */
781 for (i=0; i<6; i++) 683 for (i = 0; i < 6; i++)
782 *packet++ = dev->dev_addr[i]; 684 *packet++ = dev->dev_addr[i];
783 /* type */ 685 for (i = 0; i < 6; i++)
784 *packet++ = 0x08; 686 *packet++ = dev->dev_addr[i];
785 *packet++ = 0x06; 687 /* type */
786 /* packet number */ 688 *packet++ = 0x08;
787 *packet++ = x; 689 *packet++ = 0x06;
788 /* fill packet with data */ 690 /* packet number */
789 for (i=0; i<data_len; i++) 691 *packet++ = x;
790 *packet++ = i; 692 /* fill packet with data */
791 693 for (i = 0; i < data_len; i++)
792 lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, 694 *packet++ = i;
793 skb->len, PCI_DMA_TODEVICE); 695
794 lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]); 696 lp->tx_dma_addr[x] =
795 wmb(); /* Make sure owner changes after all others are visible */ 697 pci_map_single(lp->pci_dev, skb->data, skb->len,
796 lp->tx_ring[x].status = le16_to_cpu(status); 698 PCI_DMA_TODEVICE);
797 } 699 lp->tx_ring[x].base =
798 } 700 (u32) le32_to_cpu(lp->tx_dma_addr[x]);
799 701 wmb(); /* Make sure owner changes after all others are visible */
800 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ 702 lp->tx_ring[x].status = le16_to_cpu(status);
801 x = x | 0x0002; 703 }
802 a->write_bcr(ioaddr, 32, x); 704 }
803 705
804 lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ 706 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
805 707 x = x | 0x0002;
806 teststatus = le16_to_cpu(0x8000); 708 a->write_bcr(ioaddr, 32, x);
807 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ 709
808 710 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
809 /* Check status of descriptors */ 711
810 for (x=0; x<numbuffs; x++) { 712 teststatus = le16_to_cpu(0x8000);
811 ticks = 0; 713 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
812 rmb(); 714
813 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { 715 /* Check status of descriptors */
814 spin_unlock_irqrestore(&lp->lock, flags); 716 for (x = 0; x < numbuffs; x++) {
815 mdelay(1); 717 ticks = 0;
816 spin_lock_irqsave(&lp->lock, flags); 718 rmb();
817 rmb(); 719 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
818 ticks++; 720 spin_unlock_irqrestore(&lp->lock, flags);
819 } 721 mdelay(1);
820 if (ticks == 200) { 722 spin_lock_irqsave(&lp->lock, flags);
821 if (netif_msg_hw(lp)) 723 rmb();
822 printk("%s: Desc %d failed to reset!\n",dev->name,x); 724 ticks++;
823 break; 725 }
824 } 726 if (ticks == 200) {
825 } 727 if (netif_msg_hw(lp))
826 728 printk("%s: Desc %d failed to reset!\n",
827 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 729 dev->name, x);
828 wmb(); 730 break;
829 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 731 }
830 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); 732 }
831 733
832 for (x=0; x<numbuffs; x++) { 734 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
833 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); 735 wmb();
834 skb = lp->rx_skbuff[x]; 736 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
835 for (i=0; i<size; i++) { 737 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
836 printk("%02x ", *(skb->data+i)); 738
837 } 739 for (x = 0; x < numbuffs; x++) {
838 printk("\n"); 740 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
839 } 741 skb = lp->rx_skbuff[x];
840 } 742 for (i = 0; i < size; i++) {
841 743 printk("%02x ", *(skb->data + i));
842 x = 0; 744 }
843 rc = 0; 745 printk("\n");
844 while (x<numbuffs && !rc) { 746 }
845 skb = lp->rx_skbuff[x]; 747 }
846 packet = lp->tx_skbuff[x]->data; 748
847 for (i=0; i<size; i++) { 749 x = 0;
848 if (*(skb->data+i) != packet[i]) { 750 rc = 0;
849 if (netif_msg_hw(lp)) 751 while (x < numbuffs && !rc) {
850 printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n", 752 skb = lp->rx_skbuff[x];
851 dev->name, i, *(skb->data+i), packet[i]); 753 packet = lp->tx_skbuff[x]->data;
852 rc = 1; 754 for (i = 0; i < size; i++) {
853 break; 755 if (*(skb->data + i) != packet[i]) {
854 } 756 if (netif_msg_hw(lp))
757 printk(KERN_DEBUG
758 "%s: Error in compare! %2x - %02x %02x\n",
759 dev->name, i, *(skb->data + i),
760 packet[i]);
761 rc = 1;
762 break;
763 }
764 }
765 x++;
766 }
767 if (!rc) {
768 *data1 = 0;
855 } 769 }
856 x++;
857 }
858 if (!rc) {
859 *data1 = 0;
860 }
861 770
862clean_up: 771 clean_up:
863 pcnet32_purge_tx_ring(dev); 772 pcnet32_purge_tx_ring(dev);
864 x = a->read_csr(ioaddr, 15) & 0xFFFF; 773 x = a->read_csr(ioaddr, 15) & 0xFFFF;
865 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ 774 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
866 775
867 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 776 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
868 x = x & ~0x0002; 777 x = x & ~0x0002;
869 a->write_bcr(ioaddr, 32, x); 778 a->write_bcr(ioaddr, 32, x);
870 779
871 spin_unlock_irqrestore(&lp->lock, flags); 780 spin_unlock_irqrestore(&lp->lock, flags);
872 781
873 if (netif_running(dev)) { 782 if (netif_running(dev)) {
874 pcnet32_open(dev); 783 pcnet32_open(dev);
875 } else { 784 } else {
876 lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */ 785 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
877 } 786 }
878 787
879 return(rc); 788 return (rc);
880} /* end pcnet32_loopback_test */ 789} /* end pcnet32_loopback_test */
881 790
882static void pcnet32_led_blink_callback(struct net_device *dev) 791static void pcnet32_led_blink_callback(struct net_device *dev)
883{ 792{
884 struct pcnet32_private *lp = dev->priv; 793 struct pcnet32_private *lp = dev->priv;
885 struct pcnet32_access *a = &lp->a; 794 struct pcnet32_access *a = &lp->a;
886 ulong ioaddr = dev->base_addr; 795 ulong ioaddr = dev->base_addr;
887 unsigned long flags; 796 unsigned long flags;
888 int i; 797 int i;
889 798
890 spin_lock_irqsave(&lp->lock, flags); 799 spin_lock_irqsave(&lp->lock, flags);
891 for (i=4; i<8; i++) { 800 for (i = 4; i < 8; i++) {
892 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 801 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
893 } 802 }
894 spin_unlock_irqrestore(&lp->lock, flags); 803 spin_unlock_irqrestore(&lp->lock, flags);
895 804
896 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); 805 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
897} 806}
898 807
899static int pcnet32_phys_id(struct net_device *dev, u32 data) 808static int pcnet32_phys_id(struct net_device *dev, u32 data)
900{ 809{
901 struct pcnet32_private *lp = dev->priv; 810 struct pcnet32_private *lp = dev->priv;
902 struct pcnet32_access *a = &lp->a; 811 struct pcnet32_access *a = &lp->a;
903 ulong ioaddr = dev->base_addr; 812 ulong ioaddr = dev->base_addr;
904 unsigned long flags; 813 unsigned long flags;
905 int i, regs[4]; 814 int i, regs[4];
906 815
907 if (!lp->blink_timer.function) { 816 if (!lp->blink_timer.function) {
908 init_timer(&lp->blink_timer); 817 init_timer(&lp->blink_timer);
909 lp->blink_timer.function = (void *) pcnet32_led_blink_callback; 818 lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
910 lp->blink_timer.data = (unsigned long) dev; 819 lp->blink_timer.data = (unsigned long)dev;
911 } 820 }
912 821
913 /* Save the current value of the bcrs */ 822 /* Save the current value of the bcrs */
914 spin_lock_irqsave(&lp->lock, flags); 823 spin_lock_irqsave(&lp->lock, flags);
915 for (i=4; i<8; i++) { 824 for (i = 4; i < 8; i++) {
916 regs[i-4] = a->read_bcr(ioaddr, i); 825 regs[i - 4] = a->read_bcr(ioaddr, i);
917 } 826 }
918 spin_unlock_irqrestore(&lp->lock, flags); 827 spin_unlock_irqrestore(&lp->lock, flags);
919 828
920 mod_timer(&lp->blink_timer, jiffies); 829 mod_timer(&lp->blink_timer, jiffies);
921 set_current_state(TASK_INTERRUPTIBLE); 830 set_current_state(TASK_INTERRUPTIBLE);
922 831
923 if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))) 832 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
924 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 833 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
925 834
926 msleep_interruptible(data * 1000); 835 msleep_interruptible(data * 1000);
927 del_timer_sync(&lp->blink_timer); 836 del_timer_sync(&lp->blink_timer);
928 837
929 /* Restore the original value of the bcrs */ 838 /* Restore the original value of the bcrs */
930 spin_lock_irqsave(&lp->lock, flags); 839 spin_lock_irqsave(&lp->lock, flags);
931 for (i=4; i<8; i++) { 840 for (i = 4; i < 8; i++) {
932 a->write_bcr(ioaddr, i, regs[i-4]); 841 a->write_bcr(ioaddr, i, regs[i - 4]);
933 } 842 }
934 spin_unlock_irqrestore(&lp->lock, flags); 843 spin_unlock_irqrestore(&lp->lock, flags);
935 844
936 return 0; 845 return 0;
937} 846}
938 847
848#define PCNET32_REGS_PER_PHY 32
849#define PCNET32_MAX_PHYS 32
939static int pcnet32_get_regs_len(struct net_device *dev) 850static int pcnet32_get_regs_len(struct net_device *dev)
940{ 851{
941 return(PCNET32_NUM_REGS * sizeof(u16)); 852 struct pcnet32_private *lp = dev->priv;
853 int j = lp->phycount * PCNET32_REGS_PER_PHY;
854
855 return ((PCNET32_NUM_REGS + j) * sizeof(u16));
942} 856}
943 857
944static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 858static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
945 void *ptr) 859 void *ptr)
946{ 860{
947 int i, csr0; 861 int i, csr0;
948 u16 *buff = ptr; 862 u16 *buff = ptr;
949 struct pcnet32_private *lp = dev->priv; 863 struct pcnet32_private *lp = dev->priv;
950 struct pcnet32_access *a = &lp->a; 864 struct pcnet32_access *a = &lp->a;
951 ulong ioaddr = dev->base_addr; 865 ulong ioaddr = dev->base_addr;
952 int ticks; 866 int ticks;
953 unsigned long flags; 867 unsigned long flags;
954
955 spin_lock_irqsave(&lp->lock, flags);
956
957 csr0 = a->read_csr(ioaddr, 0);
958 if (!(csr0 & 0x0004)) { /* If not stopped */
959 /* set SUSPEND (SPND) - CSR5 bit 0 */
960 a->write_csr(ioaddr, 5, 0x0001);
961
962 /* poll waiting for bit to be set */
963 ticks = 0;
964 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
965 spin_unlock_irqrestore(&lp->lock, flags);
966 mdelay(1);
967 spin_lock_irqsave(&lp->lock, flags);
968 ticks++;
969 if (ticks > 200) {
970 if (netif_msg_hw(lp))
971 printk(KERN_DEBUG "%s: Error getting into suspend!\n",
972 dev->name);
973 break;
974 }
975 }
976 }
977 868
978 /* read address PROM */ 869 spin_lock_irqsave(&lp->lock, flags);
979 for (i=0; i<16; i += 2)
980 *buff++ = inw(ioaddr + i);
981 870
982 /* read control and status registers */ 871 csr0 = a->read_csr(ioaddr, 0);
983 for (i=0; i<90; i++) { 872 if (!(csr0 & 0x0004)) { /* If not stopped */
984 *buff++ = a->read_csr(ioaddr, i); 873 /* set SUSPEND (SPND) - CSR5 bit 0 */
985 } 874 a->write_csr(ioaddr, 5, 0x0001);
875
876 /* poll waiting for bit to be set */
877 ticks = 0;
878 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
879 spin_unlock_irqrestore(&lp->lock, flags);
880 mdelay(1);
881 spin_lock_irqsave(&lp->lock, flags);
882 ticks++;
883 if (ticks > 200) {
884 if (netif_msg_hw(lp))
885 printk(KERN_DEBUG
886 "%s: Error getting into suspend!\n",
887 dev->name);
888 break;
889 }
890 }
891 }
986 892
987 *buff++ = a->read_csr(ioaddr, 112); 893 /* read address PROM */
988 *buff++ = a->read_csr(ioaddr, 114); 894 for (i = 0; i < 16; i += 2)
895 *buff++ = inw(ioaddr + i);
989 896
990 /* read bus configuration registers */ 897 /* read control and status registers */
991 for (i=0; i<30; i++) { 898 for (i = 0; i < 90; i++) {
992 *buff++ = a->read_bcr(ioaddr, i); 899 *buff++ = a->read_csr(ioaddr, i);
993 } 900 }
994 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ 901
995 for (i=31; i<36; i++) { 902 *buff++ = a->read_csr(ioaddr, 112);
996 *buff++ = a->read_bcr(ioaddr, i); 903 *buff++ = a->read_csr(ioaddr, 114);
997 }
998 904
999 /* read mii phy registers */ 905 /* read bus configuration registers */
1000 if (lp->mii) { 906 for (i = 0; i < 30; i++) {
1001 for (i=0; i<32; i++) { 907 *buff++ = a->read_bcr(ioaddr, i);
1002 lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i); 908 }
1003 *buff++ = lp->a.read_bcr(ioaddr, 34); 909 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
910 for (i = 31; i < 36; i++) {
911 *buff++ = a->read_bcr(ioaddr, i);
1004 } 912 }
1005 }
1006 913
1007 if (!(csr0 & 0x0004)) { /* If not stopped */ 914 /* read mii phy registers */
1008 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 915 if (lp->mii) {
1009 a->write_csr(ioaddr, 5, 0x0000); 916 int j;
1010 } 917 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
918 if (lp->phymask & (1 << j)) {
919 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
920 lp->a.write_bcr(ioaddr, 33,
921 (j << 5) | i);
922 *buff++ = lp->a.read_bcr(ioaddr, 34);
923 }
924 }
925 }
926 }
1011 927
1012 i = buff - (u16 *)ptr; 928 if (!(csr0 & 0x0004)) { /* If not stopped */
1013 for (; i < PCNET32_NUM_REGS; i++) 929 /* clear SUSPEND (SPND) - CSR5 bit 0 */
1014 *buff++ = 0; 930 a->write_csr(ioaddr, 5, 0x0000);
931 }
1015 932
1016 spin_unlock_irqrestore(&lp->lock, flags); 933 spin_unlock_irqrestore(&lp->lock, flags);
1017} 934}
1018 935
1019static struct ethtool_ops pcnet32_ethtool_ops = { 936static struct ethtool_ops pcnet32_ethtool_ops = {
1020 .get_settings = pcnet32_get_settings, 937 .get_settings = pcnet32_get_settings,
1021 .set_settings = pcnet32_set_settings, 938 .set_settings = pcnet32_set_settings,
1022 .get_drvinfo = pcnet32_get_drvinfo, 939 .get_drvinfo = pcnet32_get_drvinfo,
1023 .get_msglevel = pcnet32_get_msglevel, 940 .get_msglevel = pcnet32_get_msglevel,
1024 .set_msglevel = pcnet32_set_msglevel, 941 .set_msglevel = pcnet32_set_msglevel,
1025 .nway_reset = pcnet32_nway_reset, 942 .nway_reset = pcnet32_nway_reset,
1026 .get_link = pcnet32_get_link, 943 .get_link = pcnet32_get_link,
1027 .get_ringparam = pcnet32_get_ringparam, 944 .get_ringparam = pcnet32_get_ringparam,
1028 .set_ringparam = pcnet32_set_ringparam, 945 .set_ringparam = pcnet32_set_ringparam,
1029 .get_tx_csum = ethtool_op_get_tx_csum, 946 .get_tx_csum = ethtool_op_get_tx_csum,
1030 .get_sg = ethtool_op_get_sg, 947 .get_sg = ethtool_op_get_sg,
1031 .get_tso = ethtool_op_get_tso, 948 .get_tso = ethtool_op_get_tso,
1032 .get_strings = pcnet32_get_strings, 949 .get_strings = pcnet32_get_strings,
1033 .self_test_count = pcnet32_self_test_count, 950 .self_test_count = pcnet32_self_test_count,
1034 .self_test = pcnet32_ethtool_test, 951 .self_test = pcnet32_ethtool_test,
1035 .phys_id = pcnet32_phys_id, 952 .phys_id = pcnet32_phys_id,
1036 .get_regs_len = pcnet32_get_regs_len, 953 .get_regs_len = pcnet32_get_regs_len,
1037 .get_regs = pcnet32_get_regs, 954 .get_regs = pcnet32_get_regs,
1038 .get_perm_addr = ethtool_op_get_perm_addr, 955 .get_perm_addr = ethtool_op_get_perm_addr,
1039}; 956};
1040 957
1041/* only probes for non-PCI devices, the rest are handled by 958/* only probes for non-PCI devices, the rest are handled by
1042 * pci_register_driver via pcnet32_probe_pci */ 959 * pci_register_driver via pcnet32_probe_pci */
1043 960
1044static void __devinit 961static void __devinit pcnet32_probe_vlbus(void)
1045pcnet32_probe_vlbus(void)
1046{ 962{
1047 unsigned int *port, ioaddr; 963 unsigned int *port, ioaddr;
1048 964
1049 /* search for PCnet32 VLB cards at known addresses */ 965 /* search for PCnet32 VLB cards at known addresses */
1050 for (port = pcnet32_portlist; (ioaddr = *port); port++) { 966 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1051 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { 967 if (request_region
1052 /* check if there is really a pcnet chip on that ioaddr */ 968 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1053 if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { 969 /* check if there is really a pcnet chip on that ioaddr */
1054 pcnet32_probe1(ioaddr, 0, NULL); 970 if ((inb(ioaddr + 14) == 0x57)
1055 } else { 971 && (inb(ioaddr + 15) == 0x57)) {
1056 release_region(ioaddr, PCNET32_TOTAL_SIZE); 972 pcnet32_probe1(ioaddr, 0, NULL);
1057 } 973 } else {
1058 } 974 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1059 } 975 }
976 }
977 }
1060} 978}
1061 979
1062
1063static int __devinit 980static int __devinit
1064pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) 981pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1065{ 982{
1066 unsigned long ioaddr; 983 unsigned long ioaddr;
1067 int err; 984 int err;
1068 985
1069 err = pci_enable_device(pdev); 986 err = pci_enable_device(pdev);
1070 if (err < 0) { 987 if (err < 0) {
1071 if (pcnet32_debug & NETIF_MSG_PROBE) 988 if (pcnet32_debug & NETIF_MSG_PROBE)
1072 printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); 989 printk(KERN_ERR PFX
1073 return err; 990 "failed to enable device -- err=%d\n", err);
1074 } 991 return err;
1075 pci_set_master(pdev); 992 }
993 pci_set_master(pdev);
994
995 ioaddr = pci_resource_start(pdev, 0);
996 if (!ioaddr) {
997 if (pcnet32_debug & NETIF_MSG_PROBE)
998 printk(KERN_ERR PFX
999 "card has no PCI IO resources, aborting\n");
1000 return -ENODEV;
1001 }
1076 1002
1077 ioaddr = pci_resource_start (pdev, 0); 1003 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1078 if (!ioaddr) { 1004 if (pcnet32_debug & NETIF_MSG_PROBE)
1079 if (pcnet32_debug & NETIF_MSG_PROBE) 1005 printk(KERN_ERR PFX
1080 printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n"); 1006 "architecture does not support 32bit PCI busmaster DMA\n");
1081 return -ENODEV; 1007 return -ENODEV;
1082 } 1008 }
1009 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
1010 NULL) {
1011 if (pcnet32_debug & NETIF_MSG_PROBE)
1012 printk(KERN_ERR PFX
1013 "io address range already allocated\n");
1014 return -EBUSY;
1015 }
1083 1016
1084 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { 1017 err = pcnet32_probe1(ioaddr, 1, pdev);
1085 if (pcnet32_debug & NETIF_MSG_PROBE) 1018 if (err < 0) {
1086 printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n"); 1019 pci_disable_device(pdev);
1087 return -ENODEV; 1020 }
1088 } 1021 return err;
1089 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) {
1090 if (pcnet32_debug & NETIF_MSG_PROBE)
1091 printk(KERN_ERR PFX "io address range already allocated\n");
1092 return -EBUSY;
1093 }
1094
1095 err = pcnet32_probe1(ioaddr, 1, pdev);
1096 if (err < 0) {
1097 pci_disable_device(pdev);
1098 }
1099 return err;
1100} 1022}
1101 1023
1102
1103/* pcnet32_probe1 1024/* pcnet32_probe1
1104 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. 1025 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1105 * pdev will be NULL when called from pcnet32_probe_vlbus. 1026 * pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1107static int __devinit 1028static int __devinit
1108pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) 1029pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1109{ 1030{
1110 struct pcnet32_private *lp; 1031 struct pcnet32_private *lp;
1111 dma_addr_t lp_dma_addr; 1032 dma_addr_t lp_dma_addr;
1112 int i, media; 1033 int i, media;
1113 int fdx, mii, fset, dxsuflo; 1034 int fdx, mii, fset, dxsuflo;
1114 int chip_version; 1035 int chip_version;
1115 char *chipname; 1036 char *chipname;
1116 struct net_device *dev; 1037 struct net_device *dev;
1117 struct pcnet32_access *a = NULL; 1038 struct pcnet32_access *a = NULL;
1118 u8 promaddr[6]; 1039 u8 promaddr[6];
1119 int ret = -ENODEV; 1040 int ret = -ENODEV;
1120 1041
1121 /* reset the chip */ 1042 /* reset the chip */
1122 pcnet32_wio_reset(ioaddr); 1043 pcnet32_wio_reset(ioaddr);
1123 1044
1124 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ 1045 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1125 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { 1046 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1126 a = &pcnet32_wio; 1047 a = &pcnet32_wio;
1127 } else { 1048 } else {
1128 pcnet32_dwio_reset(ioaddr); 1049 pcnet32_dwio_reset(ioaddr);
1129 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { 1050 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1130 a = &pcnet32_dwio; 1051 && pcnet32_dwio_check(ioaddr)) {
1131 } else 1052 a = &pcnet32_dwio;
1132 goto err_release_region; 1053 } else
1133 } 1054 goto err_release_region;
1134 1055 }
1135 chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16); 1056
1136 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) 1057 chip_version =
1137 printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version); 1058 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1138 if ((chip_version & 0xfff) != 0x003) { 1059 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1139 if (pcnet32_debug & NETIF_MSG_PROBE) 1060 printk(KERN_INFO " PCnet chip version is %#x.\n",
1140 printk(KERN_INFO PFX "Unsupported chip version.\n"); 1061 chip_version);
1141 goto err_release_region; 1062 if ((chip_version & 0xfff) != 0x003) {
1142 } 1063 if (pcnet32_debug & NETIF_MSG_PROBE)
1143 1064 printk(KERN_INFO PFX "Unsupported chip version.\n");
1144 /* initialize variables */ 1065 goto err_release_region;
1145 fdx = mii = fset = dxsuflo = 0; 1066 }
1146 chip_version = (chip_version >> 12) & 0xffff; 1067
1147 1068 /* initialize variables */
1148 switch (chip_version) { 1069 fdx = mii = fset = dxsuflo = 0;
1149 case 0x2420: 1070 chip_version = (chip_version >> 12) & 0xffff;
1150 chipname = "PCnet/PCI 79C970"; /* PCI */ 1071
1151 break; 1072 switch (chip_version) {
1152 case 0x2430: 1073 case 0x2420:
1153 if (shared) 1074 chipname = "PCnet/PCI 79C970"; /* PCI */
1154 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ 1075 break;
1155 else 1076 case 0x2430:
1156 chipname = "PCnet/32 79C965"; /* 486/VL bus */ 1077 if (shared)
1157 break; 1078 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1158 case 0x2621: 1079 else
1159 chipname = "PCnet/PCI II 79C970A"; /* PCI */ 1080 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1160 fdx = 1; 1081 break;
1161 break; 1082 case 0x2621:
1162 case 0x2623: 1083 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1163 chipname = "PCnet/FAST 79C971"; /* PCI */ 1084 fdx = 1;
1164 fdx = 1; mii = 1; fset = 1; 1085 break;
1165 break; 1086 case 0x2623:
1166 case 0x2624: 1087 chipname = "PCnet/FAST 79C971"; /* PCI */
1167 chipname = "PCnet/FAST+ 79C972"; /* PCI */ 1088 fdx = 1;
1168 fdx = 1; mii = 1; fset = 1; 1089 mii = 1;
1169 break; 1090 fset = 1;
1170 case 0x2625: 1091 break;
1171 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1092 case 0x2624:
1172 fdx = 1; mii = 1; 1093 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1173 break; 1094 fdx = 1;
1174 case 0x2626: 1095 mii = 1;
1175 chipname = "PCnet/Home 79C978"; /* PCI */ 1096 fset = 1;
1176 fdx = 1; 1097 break;
1098 case 0x2625:
1099 chipname = "PCnet/FAST III 79C973"; /* PCI */
1100 fdx = 1;
1101 mii = 1;
1102 break;
1103 case 0x2626:
1104 chipname = "PCnet/Home 79C978"; /* PCI */
1105 fdx = 1;
1106 /*
1107 * This is based on specs published at www.amd.com. This section
1108 * assumes that a card with a 79C978 wants to go into standard
1109 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1110 * and the module option homepna=1 can select this instead.
1111 */
1112 media = a->read_bcr(ioaddr, 49);
1113 media &= ~3; /* default to 10Mb ethernet */
1114 if (cards_found < MAX_UNITS && homepna[cards_found])
1115 media |= 1; /* switch to home wiring mode */
1116 if (pcnet32_debug & NETIF_MSG_PROBE)
1117 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
1118 (media & 1) ? "1" : "10");
1119 a->write_bcr(ioaddr, 49, media);
1120 break;
1121 case 0x2627:
1122 chipname = "PCnet/FAST III 79C975"; /* PCI */
1123 fdx = 1;
1124 mii = 1;
1125 break;
1126 case 0x2628:
1127 chipname = "PCnet/PRO 79C976";
1128 fdx = 1;
1129 mii = 1;
1130 break;
1131 default:
1132 if (pcnet32_debug & NETIF_MSG_PROBE)
1133 printk(KERN_INFO PFX
1134 "PCnet version %#x, no PCnet32 chip.\n",
1135 chip_version);
1136 goto err_release_region;
1137 }
1138
1177 /* 1139 /*
1178 * This is based on specs published at www.amd.com. This section 1140 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1179 * assumes that a card with a 79C978 wants to go into standard 1141 * starting until the packet is loaded. Strike one for reliability, lose
1180 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, 1142 * one for latency - although on PCI this isnt a big loss. Older chips
1181 * and the module option homepna=1 can select this instead. 1143 * have FIFO's smaller than a packet, so you can't do this.
1144 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1182 */ 1145 */
1183 media = a->read_bcr(ioaddr, 49); 1146
1184 media &= ~3; /* default to 10Mb ethernet */ 1147 if (fset) {
1185 if (cards_found < MAX_UNITS && homepna[cards_found]) 1148 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1186 media |= 1; /* switch to home wiring mode */ 1149 a->write_csr(ioaddr, 80,
1187 if (pcnet32_debug & NETIF_MSG_PROBE) 1150 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1188 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", 1151 dxsuflo = 1;
1189 (media & 1) ? "1" : "10"); 1152 }
1190 a->write_bcr(ioaddr, 49, media); 1153
1191 break; 1154 dev = alloc_etherdev(0);
1192 case 0x2627: 1155 if (!dev) {
1193 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1156 if (pcnet32_debug & NETIF_MSG_PROBE)
1194 fdx = 1; mii = 1; 1157 printk(KERN_ERR PFX "Memory allocation failed.\n");
1195 break; 1158 ret = -ENOMEM;
1196 case 0x2628: 1159 goto err_release_region;
1197 chipname = "PCnet/PRO 79C976"; 1160 }
1198 fdx = 1; mii = 1; 1161 SET_NETDEV_DEV(dev, &pdev->dev);
1199 break; 1162
1200 default:
1201 if (pcnet32_debug & NETIF_MSG_PROBE)
1202 printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
1203 chip_version);
1204 goto err_release_region;
1205 }
1206
1207 /*
1208 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1209 * starting until the packet is loaded. Strike one for reliability, lose
1210 * one for latency - although on PCI this isnt a big loss. Older chips
1211 * have FIFO's smaller than a packet, so you can't do this.
1212 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1213 */
1214
1215 if (fset) {
1216 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1217 a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1218 dxsuflo = 1;
1219 }
1220
1221 dev = alloc_etherdev(0);
1222 if (!dev) {
1223 if (pcnet32_debug & NETIF_MSG_PROBE) 1163 if (pcnet32_debug & NETIF_MSG_PROBE)
1224 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1164 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
1225 ret = -ENOMEM; 1165
1226 goto err_release_region; 1166 /* In most chips, after a chip reset, the ethernet address is read from the
1227 } 1167 * station address PROM at the base address and programmed into the
1228 SET_NETDEV_DEV(dev, &pdev->dev); 1168 * "Physical Address Registers" CSR12-14.
1229 1169 * As a precautionary measure, we read the PROM values and complain if
1230 if (pcnet32_debug & NETIF_MSG_PROBE) 1170 * they disagree with the CSRs. Either way, we use the CSR values, and
1231 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); 1171 * double check that they are valid.
1232 1172 */
1233 /* In most chips, after a chip reset, the ethernet address is read from the 1173 for (i = 0; i < 3; i++) {
1234 * station address PROM at the base address and programmed into the 1174 unsigned int val;
1235 * "Physical Address Registers" CSR12-14. 1175 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1236 * As a precautionary measure, we read the PROM values and complain if 1176 /* There may be endianness issues here. */
1237 * they disagree with the CSRs. Either way, we use the CSR values, and 1177 dev->dev_addr[2 * i] = val & 0x0ff;
1238 * double check that they are valid. 1178 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1239 */ 1179 }
1240 for (i = 0; i < 3; i++) { 1180
1241 unsigned int val; 1181 /* read PROM address and compare with CSR address */
1242 val = a->read_csr(ioaddr, i+12) & 0x0ffff;
1243 /* There may be endianness issues here. */
1244 dev->dev_addr[2*i] = val & 0x0ff;
1245 dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
1246 }
1247
1248 /* read PROM address and compare with CSR address */
1249 for (i = 0; i < 6; i++)
1250 promaddr[i] = inb(ioaddr + i);
1251
1252 if (memcmp(promaddr, dev->dev_addr, 6)
1253 || !is_valid_ether_addr(dev->dev_addr)) {
1254 if (is_valid_ether_addr(promaddr)) {
1255 if (pcnet32_debug & NETIF_MSG_PROBE) {
1256 printk(" warning: CSR address invalid,\n");
1257 printk(KERN_INFO " using instead PROM address of");
1258 }
1259 memcpy(dev->dev_addr, promaddr, 6);
1260 }
1261 }
1262 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1263
1264 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1265 if (!is_valid_ether_addr(dev->perm_addr))
1266 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1267
1268 if (pcnet32_debug & NETIF_MSG_PROBE) {
1269 for (i = 0; i < 6; i++) 1182 for (i = 0; i < 6; i++)
1270 printk(" %2.2x", dev->dev_addr[i]); 1183 promaddr[i] = inb(ioaddr + i);
1271 1184
1272 /* Version 0x2623 and 0x2624 */ 1185 if (memcmp(promaddr, dev->dev_addr, 6)
1273 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1186 || !is_valid_ether_addr(dev->dev_addr)) {
1274 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ 1187 if (is_valid_ether_addr(promaddr)) {
1275 printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i); 1188 if (pcnet32_debug & NETIF_MSG_PROBE) {
1276 switch(i>>10) { 1189 printk(" warning: CSR address invalid,\n");
1277 case 0: printk(" 20 bytes,"); break; 1190 printk(KERN_INFO
1278 case 1: printk(" 64 bytes,"); break; 1191 " using instead PROM address of");
1279 case 2: printk(" 128 bytes,"); break; 1192 }
1280 case 3: printk("~220 bytes,"); break; 1193 memcpy(dev->dev_addr, promaddr, 6);
1281 } 1194 }
1282 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ 1195 }
1283 printk(" BCR18(%x):",i&0xffff); 1196 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1284 if (i & (1<<5)) printk("BurstWrEn "); 1197
1285 if (i & (1<<6)) printk("BurstRdEn "); 1198 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1286 if (i & (1<<7)) printk("DWordIO "); 1199 if (!is_valid_ether_addr(dev->perm_addr))
1287 if (i & (1<<11)) printk("NoUFlow "); 1200 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1288 i = a->read_bcr(ioaddr, 25); 1201
1289 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8); 1202 if (pcnet32_debug & NETIF_MSG_PROBE) {
1290 i = a->read_bcr(ioaddr, 26); 1203 for (i = 0; i < 6; i++)
1291 printk(" SRAM_BND=0x%04x,",i<<8); 1204 printk(" %2.2x", dev->dev_addr[i]);
1292 i = a->read_bcr(ioaddr, 27); 1205
1293 if (i & (1<<14)) printk("LowLatRx"); 1206 /* Version 0x2623 and 0x2624 */
1294 } 1207 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1295 } 1208 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1296 1209 printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
1297 dev->base_addr = ioaddr; 1210 switch (i >> 10) {
1298 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ 1211 case 0:
1299 if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { 1212 printk(" 20 bytes,");
1300 if (pcnet32_debug & NETIF_MSG_PROBE) 1213 break;
1301 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1214 case 1:
1302 ret = -ENOMEM; 1215 printk(" 64 bytes,");
1303 goto err_free_netdev; 1216 break;
1304 } 1217 case 2:
1305 1218 printk(" 128 bytes,");
1306 memset(lp, 0, sizeof(*lp)); 1219 break;
1307 lp->dma_addr = lp_dma_addr; 1220 case 3:
1308 lp->pci_dev = pdev; 1221 printk("~220 bytes,");
1309 1222 break;
1310 spin_lock_init(&lp->lock); 1223 }
1311 1224 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1312 SET_MODULE_OWNER(dev); 1225 printk(" BCR18(%x):", i & 0xffff);
1313 SET_NETDEV_DEV(dev, &pdev->dev); 1226 if (i & (1 << 5))
1314 dev->priv = lp; 1227 printk("BurstWrEn ");
1315 lp->name = chipname; 1228 if (i & (1 << 6))
1316 lp->shared_irq = shared; 1229 printk("BurstRdEn ");
1317 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ 1230 if (i & (1 << 7))
1318 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ 1231 printk("DWordIO ");
1319 lp->tx_mod_mask = lp->tx_ring_size - 1; 1232 if (i & (1 << 11))
1320 lp->rx_mod_mask = lp->rx_ring_size - 1; 1233 printk("NoUFlow ");
1321 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); 1234 i = a->read_bcr(ioaddr, 25);
1322 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); 1235 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
1323 lp->mii_if.full_duplex = fdx; 1236 i = a->read_bcr(ioaddr, 26);
1324 lp->mii_if.phy_id_mask = 0x1f; 1237 printk(" SRAM_BND=0x%04x,", i << 8);
1325 lp->mii_if.reg_num_mask = 0x1f; 1238 i = a->read_bcr(ioaddr, 27);
1326 lp->dxsuflo = dxsuflo; 1239 if (i & (1 << 14))
1327 lp->mii = mii; 1240 printk("LowLatRx");
1328 lp->msg_enable = pcnet32_debug; 1241 }
1329 if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) 1242 }
1330 lp->options = PCNET32_PORT_ASEL; 1243
1331 else 1244 dev->base_addr = ioaddr;
1332 lp->options = options_mapping[options[cards_found]]; 1245 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1333 lp->mii_if.dev = dev; 1246 if ((lp =
1334 lp->mii_if.mdio_read = mdio_read; 1247 pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
1335 lp->mii_if.mdio_write = mdio_write; 1248 if (pcnet32_debug & NETIF_MSG_PROBE)
1336 1249 printk(KERN_ERR PFX
1337 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && 1250 "Consistent memory allocation failed.\n");
1338 ((cards_found>=MAX_UNITS) || full_duplex[cards_found])) 1251 ret = -ENOMEM;
1339 lp->options |= PCNET32_PORT_FD; 1252 goto err_free_netdev;
1340 1253 }
1341 if (!a) { 1254
1342 if (pcnet32_debug & NETIF_MSG_PROBE) 1255 memset(lp, 0, sizeof(*lp));
1343 printk(KERN_ERR PFX "No access methods\n"); 1256 lp->dma_addr = lp_dma_addr;
1344 ret = -ENODEV; 1257 lp->pci_dev = pdev;
1345 goto err_free_consistent; 1258
1346 } 1259 spin_lock_init(&lp->lock);
1347 lp->a = *a; 1260
1348 1261 SET_MODULE_OWNER(dev);
1349 /* prior to register_netdev, dev->name is not yet correct */ 1262 SET_NETDEV_DEV(dev, &pdev->dev);
1350 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { 1263 dev->priv = lp;
1351 ret = -ENOMEM; 1264 lp->name = chipname;
1352 goto err_free_ring; 1265 lp->shared_irq = shared;
1353 } 1266 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1354 /* detect special T1/E1 WAN card by checking for MAC address */ 1267 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1355 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 1268 lp->tx_mod_mask = lp->tx_ring_size - 1;
1269 lp->rx_mod_mask = lp->rx_ring_size - 1;
1270 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1271 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1272 lp->mii_if.full_duplex = fdx;
1273 lp->mii_if.phy_id_mask = 0x1f;
1274 lp->mii_if.reg_num_mask = 0x1f;
1275 lp->dxsuflo = dxsuflo;
1276 lp->mii = mii;
1277 lp->msg_enable = pcnet32_debug;
1278 if ((cards_found >= MAX_UNITS)
1279 || (options[cards_found] > sizeof(options_mapping)))
1280 lp->options = PCNET32_PORT_ASEL;
1281 else
1282 lp->options = options_mapping[options[cards_found]];
1283 lp->mii_if.dev = dev;
1284 lp->mii_if.mdio_read = mdio_read;
1285 lp->mii_if.mdio_write = mdio_write;
1286
1287 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1288 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1289 lp->options |= PCNET32_PORT_FD;
1290
1291 if (!a) {
1292 if (pcnet32_debug & NETIF_MSG_PROBE)
1293 printk(KERN_ERR PFX "No access methods\n");
1294 ret = -ENODEV;
1295 goto err_free_consistent;
1296 }
1297 lp->a = *a;
1298
1299 /* prior to register_netdev, dev->name is not yet correct */
1300 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1301 ret = -ENOMEM;
1302 goto err_free_ring;
1303 }
1304 /* detect special T1/E1 WAN card by checking for MAC address */
1305 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1356 && dev->dev_addr[2] == 0x75) 1306 && dev->dev_addr[2] == 0x75)
1357 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1307 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1358
1359 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1360 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1361 for (i = 0; i < 6; i++)
1362 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1363 lp->init_block.filter[0] = 0x00000000;
1364 lp->init_block.filter[1] = 0x00000000;
1365 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1366 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1367
1368 /* switch pcnet32 to 32bit mode */
1369 a->write_bcr(ioaddr, 20, 2);
1370
1371 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1372 init_block)) & 0xffff);
1373 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1374 init_block)) >> 16);
1375
1376 if (pdev) { /* use the IRQ provided by PCI */
1377 dev->irq = pdev->irq;
1378 if (pcnet32_debug & NETIF_MSG_PROBE)
1379 printk(" assigned IRQ %d.\n", dev->irq);
1380 } else {
1381 unsigned long irq_mask = probe_irq_on();
1382 1308
1383 /* 1309 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1384 * To auto-IRQ we enable the initialization-done and DMA error 1310 lp->init_block.tlen_rlen =
1385 * interrupts. For ISA boards we get a DMA error, but VLB and PCI 1311 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1386 * boards will work. 1312 for (i = 0; i < 6; i++)
1387 */ 1313 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1388 /* Trigger an initialization just for the interrupt. */ 1314 lp->init_block.filter[0] = 0x00000000;
1389 a->write_csr (ioaddr, 0, 0x41); 1315 lp->init_block.filter[1] = 0x00000000;
1390 mdelay (1); 1316 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1317 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1318
1319 /* switch pcnet32 to 32bit mode */
1320 a->write_bcr(ioaddr, 20, 2);
1321
1322 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1323 init_block)) & 0xffff);
1324 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1325 init_block)) >> 16);
1326
1327 if (pdev) { /* use the IRQ provided by PCI */
1328 dev->irq = pdev->irq;
1329 if (pcnet32_debug & NETIF_MSG_PROBE)
1330 printk(" assigned IRQ %d.\n", dev->irq);
1331 } else {
1332 unsigned long irq_mask = probe_irq_on();
1333
1334 /*
1335 * To auto-IRQ we enable the initialization-done and DMA error
1336 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1337 * boards will work.
1338 */
1339 /* Trigger an initialization just for the interrupt. */
1340 a->write_csr(ioaddr, 0, 0x41);
1341 mdelay(1);
1342
1343 dev->irq = probe_irq_off(irq_mask);
1344 if (!dev->irq) {
1345 if (pcnet32_debug & NETIF_MSG_PROBE)
1346 printk(", failed to detect IRQ line.\n");
1347 ret = -ENODEV;
1348 goto err_free_ring;
1349 }
1350 if (pcnet32_debug & NETIF_MSG_PROBE)
1351 printk(", probed IRQ %d.\n", dev->irq);
1352 }
1391 1353
1392 dev->irq = probe_irq_off (irq_mask); 1354 /* Set the mii phy_id so that we can query the link state */
1393 if (!dev->irq) { 1355 if (lp->mii) {
1394 if (pcnet32_debug & NETIF_MSG_PROBE) 1356 /* lp->phycount and lp->phymask are set to 0 by memset above */
1395 printk(", failed to detect IRQ line.\n"); 1357
1396 ret = -ENODEV; 1358 lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1397 goto err_free_ring; 1359 /* scan for PHYs */
1360 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1361 unsigned short id1, id2;
1362
1363 id1 = mdio_read(dev, i, MII_PHYSID1);
1364 if (id1 == 0xffff)
1365 continue;
1366 id2 = mdio_read(dev, i, MII_PHYSID2);
1367 if (id2 == 0xffff)
1368 continue;
1369 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1370 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1371 lp->phycount++;
1372 lp->phymask |= (1 << i);
1373 lp->mii_if.phy_id = i;
1374 if (pcnet32_debug & NETIF_MSG_PROBE)
1375 printk(KERN_INFO PFX
1376 "Found PHY %04x:%04x at address %d.\n",
1377 id1, id2, i);
1378 }
1379 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1380 if (lp->phycount > 1) {
1381 lp->options |= PCNET32_PORT_MII;
1382 }
1398 } 1383 }
1399 if (pcnet32_debug & NETIF_MSG_PROBE) 1384
1400 printk(", probed IRQ %d.\n", dev->irq); 1385 init_timer(&lp->watchdog_timer);
1401 } 1386 lp->watchdog_timer.data = (unsigned long)dev;
1402 1387 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1403 /* Set the mii phy_id so that we can query the link state */ 1388
1404 if (lp->mii) 1389 /* The PCNET32-specific entries in the device structure. */
1405 lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f; 1390 dev->open = &pcnet32_open;
1406 1391 dev->hard_start_xmit = &pcnet32_start_xmit;
1407 init_timer (&lp->watchdog_timer); 1392 dev->stop = &pcnet32_close;
1408 lp->watchdog_timer.data = (unsigned long) dev; 1393 dev->get_stats = &pcnet32_get_stats;
1409 lp->watchdog_timer.function = (void *) &pcnet32_watchdog; 1394 dev->set_multicast_list = &pcnet32_set_multicast_list;
1410 1395 dev->do_ioctl = &pcnet32_ioctl;
1411 /* The PCNET32-specific entries in the device structure. */ 1396 dev->ethtool_ops = &pcnet32_ethtool_ops;
1412 dev->open = &pcnet32_open; 1397 dev->tx_timeout = pcnet32_tx_timeout;
1413 dev->hard_start_xmit = &pcnet32_start_xmit; 1398 dev->watchdog_timeo = (5 * HZ);
1414 dev->stop = &pcnet32_close;
1415 dev->get_stats = &pcnet32_get_stats;
1416 dev->set_multicast_list = &pcnet32_set_multicast_list;
1417 dev->do_ioctl = &pcnet32_ioctl;
1418 dev->ethtool_ops = &pcnet32_ethtool_ops;
1419 dev->tx_timeout = pcnet32_tx_timeout;
1420 dev->watchdog_timeo = (5*HZ);
1421 1399
1422#ifdef CONFIG_NET_POLL_CONTROLLER 1400#ifdef CONFIG_NET_POLL_CONTROLLER
1423 dev->poll_controller = pcnet32_poll_controller; 1401 dev->poll_controller = pcnet32_poll_controller;
1424#endif 1402#endif
1425 1403
1426 /* Fill in the generic fields of the device structure. */ 1404 /* Fill in the generic fields of the device structure. */
1427 if (register_netdev(dev)) 1405 if (register_netdev(dev))
1428 goto err_free_ring; 1406 goto err_free_ring;
1429 1407
1430 if (pdev) { 1408 if (pdev) {
1431 pci_set_drvdata(pdev, dev); 1409 pci_set_drvdata(pdev, dev);
1432 } else { 1410 } else {
1433 lp->next = pcnet32_dev; 1411 lp->next = pcnet32_dev;
1434 pcnet32_dev = dev; 1412 pcnet32_dev = dev;
1435 } 1413 }
1436
1437 if (pcnet32_debug & NETIF_MSG_PROBE)
1438 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1439 cards_found++;
1440
1441 /* enable LED writes */
1442 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1443
1444 return 0;
1445
1446err_free_ring:
1447 pcnet32_free_ring(dev);
1448err_free_consistent:
1449 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1450err_free_netdev:
1451 free_netdev(dev);
1452err_release_region:
1453 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1454 return ret;
1455}
1456 1414
1415 if (pcnet32_debug & NETIF_MSG_PROBE)
1416 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1417 cards_found++;
1418
1419 /* enable LED writes */
1420 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1421
1422 return 0;
1423
1424 err_free_ring:
1425 pcnet32_free_ring(dev);
1426 err_free_consistent:
1427 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1428 err_free_netdev:
1429 free_netdev(dev);
1430 err_release_region:
1431 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1432 return ret;
1433}
1457 1434
1458/* if any allocation fails, caller must also call pcnet32_free_ring */ 1435/* if any allocation fails, caller must also call pcnet32_free_ring */
1459static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1436static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1460{ 1437{
1461 struct pcnet32_private *lp = dev->priv; 1438 struct pcnet32_private *lp = dev->priv;
1462 1439
1463 lp->tx_ring = pci_alloc_consistent(lp->pci_dev, 1440 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1464 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1441 sizeof(struct pcnet32_tx_head) *
1465 &lp->tx_ring_dma_addr); 1442 lp->tx_ring_size,
1466 if (lp->tx_ring == NULL) { 1443 &lp->tx_ring_dma_addr);
1467 if (pcnet32_debug & NETIF_MSG_DRV) 1444 if (lp->tx_ring == NULL) {
1468 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", 1445 if (pcnet32_debug & NETIF_MSG_DRV)
1469 name); 1446 printk("\n" KERN_ERR PFX
1470 return -ENOMEM; 1447 "%s: Consistent memory allocation failed.\n",
1471 } 1448 name);
1472 1449 return -ENOMEM;
1473 lp->rx_ring = pci_alloc_consistent(lp->pci_dev, 1450 }
1474 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1475 &lp->rx_ring_dma_addr);
1476 if (lp->rx_ring == NULL) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1479 name);
1480 return -ENOMEM;
1481 }
1482
1483 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1484 GFP_ATOMIC);
1485 if (!lp->tx_dma_addr) {
1486 if (pcnet32_debug & NETIF_MSG_DRV)
1487 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1488 return -ENOMEM;
1489 }
1490 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1491
1492 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1493 GFP_ATOMIC);
1494 if (!lp->rx_dma_addr) {
1495 if (pcnet32_debug & NETIF_MSG_DRV)
1496 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1497 return -ENOMEM;
1498 }
1499 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1500
1501 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1502 GFP_ATOMIC);
1503 if (!lp->tx_skbuff) {
1504 if (pcnet32_debug & NETIF_MSG_DRV)
1505 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1506 return -ENOMEM;
1507 }
1508 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1509
1510 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1511 GFP_ATOMIC);
1512 if (!lp->rx_skbuff) {
1513 if (pcnet32_debug & NETIF_MSG_DRV)
1514 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1515 return -ENOMEM;
1516 }
1517 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1518 1451
1519 return 0; 1452 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1520} 1453 sizeof(struct pcnet32_rx_head) *
1454 lp->rx_ring_size,
1455 &lp->rx_ring_dma_addr);
1456 if (lp->rx_ring == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk("\n" KERN_ERR PFX
1459 "%s: Consistent memory allocation failed.\n",
1460 name);
1461 return -ENOMEM;
1462 }
1521 1463
1464 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1465 GFP_ATOMIC);
1466 if (!lp->tx_dma_addr) {
1467 if (pcnet32_debug & NETIF_MSG_DRV)
1468 printk("\n" KERN_ERR PFX
1469 "%s: Memory allocation failed.\n", name);
1470 return -ENOMEM;
1471 }
1472 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1473
1474 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1475 GFP_ATOMIC);
1476 if (!lp->rx_dma_addr) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk("\n" KERN_ERR PFX
1479 "%s: Memory allocation failed.\n", name);
1480 return -ENOMEM;
1481 }
1482 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1483
1484 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1485 GFP_ATOMIC);
1486 if (!lp->tx_skbuff) {
1487 if (pcnet32_debug & NETIF_MSG_DRV)
1488 printk("\n" KERN_ERR PFX
1489 "%s: Memory allocation failed.\n", name);
1490 return -ENOMEM;
1491 }
1492 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1493
1494 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1495 GFP_ATOMIC);
1496 if (!lp->rx_skbuff) {
1497 if (pcnet32_debug & NETIF_MSG_DRV)
1498 printk("\n" KERN_ERR PFX
1499 "%s: Memory allocation failed.\n", name);
1500 return -ENOMEM;
1501 }
1502 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1503
1504 return 0;
1505}
1522 1506
1523static void pcnet32_free_ring(struct net_device *dev) 1507static void pcnet32_free_ring(struct net_device *dev)
1524{ 1508{
1525 struct pcnet32_private *lp = dev->priv; 1509 struct pcnet32_private *lp = dev->priv;
1526 1510
1527 kfree(lp->tx_skbuff); 1511 kfree(lp->tx_skbuff);
1528 lp->tx_skbuff = NULL; 1512 lp->tx_skbuff = NULL;
1529 1513
1530 kfree(lp->rx_skbuff); 1514 kfree(lp->rx_skbuff);
1531 lp->rx_skbuff = NULL; 1515 lp->rx_skbuff = NULL;
1532 1516
1533 kfree(lp->tx_dma_addr); 1517 kfree(lp->tx_dma_addr);
1534 lp->tx_dma_addr = NULL; 1518 lp->tx_dma_addr = NULL;
1535 1519
1536 kfree(lp->rx_dma_addr); 1520 kfree(lp->rx_dma_addr);
1537 lp->rx_dma_addr = NULL; 1521 lp->rx_dma_addr = NULL;
1538 1522
1539 if (lp->tx_ring) { 1523 if (lp->tx_ring) {
1540 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1524 pci_free_consistent(lp->pci_dev,
1541 lp->tx_ring, lp->tx_ring_dma_addr); 1525 sizeof(struct pcnet32_tx_head) *
1542 lp->tx_ring = NULL; 1526 lp->tx_ring_size, lp->tx_ring,
1543 } 1527 lp->tx_ring_dma_addr);
1528 lp->tx_ring = NULL;
1529 }
1544 1530
1545 if (lp->rx_ring) { 1531 if (lp->rx_ring) {
1546 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, 1532 pci_free_consistent(lp->pci_dev,
1547 lp->rx_ring, lp->rx_ring_dma_addr); 1533 sizeof(struct pcnet32_rx_head) *
1548 lp->rx_ring = NULL; 1534 lp->rx_ring_size, lp->rx_ring,
1549 } 1535 lp->rx_ring_dma_addr);
1536 lp->rx_ring = NULL;
1537 }
1550} 1538}
1551 1539
1552 1540static int pcnet32_open(struct net_device *dev)
1553static int
1554pcnet32_open(struct net_device *dev)
1555{ 1541{
1556 struct pcnet32_private *lp = dev->priv; 1542 struct pcnet32_private *lp = dev->priv;
1557 unsigned long ioaddr = dev->base_addr; 1543 unsigned long ioaddr = dev->base_addr;
1558 u16 val; 1544 u16 val;
1559 int i; 1545 int i;
1560 int rc; 1546 int rc;
1561 unsigned long flags; 1547 unsigned long flags;
1562 1548
1563 if (request_irq(dev->irq, &pcnet32_interrupt, 1549 if (request_irq(dev->irq, &pcnet32_interrupt,
1564 lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) { 1550 lp->shared_irq ? SA_SHIRQ : 0, dev->name,
1565 return -EAGAIN; 1551 (void *)dev)) {
1566 } 1552 return -EAGAIN;
1567 1553 }
1568 spin_lock_irqsave(&lp->lock, flags); 1554
1569 /* Check for a valid station address */ 1555 spin_lock_irqsave(&lp->lock, flags);
1570 if (!is_valid_ether_addr(dev->dev_addr)) { 1556 /* Check for a valid station address */
1571 rc = -EINVAL; 1557 if (!is_valid_ether_addr(dev->dev_addr)) {
1572 goto err_free_irq; 1558 rc = -EINVAL;
1573 } 1559 goto err_free_irq;
1574 1560 }
1575 /* Reset the PCNET32 */ 1561
1576 lp->a.reset (ioaddr); 1562 /* Reset the PCNET32 */
1577 1563 lp->a.reset(ioaddr);
1578 /* switch pcnet32 to 32bit mode */ 1564
1579 lp->a.write_bcr (ioaddr, 20, 2); 1565 /* switch pcnet32 to 32bit mode */
1580 1566 lp->a.write_bcr(ioaddr, 20, 2);
1581 if (netif_msg_ifup(lp)) 1567
1582 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 1568 if (netif_msg_ifup(lp))
1583 dev->name, dev->irq, 1569 printk(KERN_DEBUG
1584 (u32) (lp->tx_ring_dma_addr), 1570 "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1585 (u32) (lp->rx_ring_dma_addr), 1571 dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
1586 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); 1572 (u32) (lp->rx_ring_dma_addr),
1587 1573 (u32) (lp->dma_addr +
1588 /* set/reset autoselect bit */ 1574 offsetof(struct pcnet32_private, init_block)));
1589 val = lp->a.read_bcr (ioaddr, 2) & ~2; 1575
1590 if (lp->options & PCNET32_PORT_ASEL) 1576 /* set/reset autoselect bit */
1591 val |= 2; 1577 val = lp->a.read_bcr(ioaddr, 2) & ~2;
1592 lp->a.write_bcr (ioaddr, 2, val); 1578 if (lp->options & PCNET32_PORT_ASEL)
1593
1594 /* handle full duplex setting */
1595 if (lp->mii_if.full_duplex) {
1596 val = lp->a.read_bcr (ioaddr, 9) & ~3;
1597 if (lp->options & PCNET32_PORT_FD) {
1598 val |= 1;
1599 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1600 val |= 2; 1579 val |= 2;
1601 } else if (lp->options & PCNET32_PORT_ASEL) { 1580 lp->a.write_bcr(ioaddr, 2, val);
1602 /* workaround of xSeries250, turn on for 79C975 only */ 1581
1603 i = ((lp->a.read_csr(ioaddr, 88) | 1582 /* handle full duplex setting */
1604 (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff; 1583 if (lp->mii_if.full_duplex) {
1605 if (i == 0x2627) 1584 val = lp->a.read_bcr(ioaddr, 9) & ~3;
1606 val |= 3; 1585 if (lp->options & PCNET32_PORT_FD) {
1607 } 1586 val |= 1;
1608 lp->a.write_bcr (ioaddr, 9, val); 1587 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1609 } 1588 val |= 2;
1610 1589 } else if (lp->options & PCNET32_PORT_ASEL) {
1611 /* set/reset GPSI bit in test register */ 1590 /* workaround of xSeries250, turn on for 79C975 only */
1612 val = lp->a.read_csr (ioaddr, 124) & ~0x10; 1591 i = ((lp->a.read_csr(ioaddr, 88) |
1613 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) 1592 (lp->a.
1614 val |= 0x10; 1593 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1615 lp->a.write_csr (ioaddr, 124, val); 1594 if (i == 0x2627)
1616 1595 val |= 3;
1617 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 1596 }
1618 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 1597 lp->a.write_bcr(ioaddr, 9, val);
1598 }
1599
1600 /* set/reset GPSI bit in test register */
1601 val = lp->a.read_csr(ioaddr, 124) & ~0x10;
1602 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
1603 val |= 0x10;
1604 lp->a.write_csr(ioaddr, 124, val);
1605
1606 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1607 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
1619 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 1608 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1620 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 1609 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
1621 if (lp->options & PCNET32_PORT_ASEL) { 1610 if (lp->options & PCNET32_PORT_ASEL) {
1622 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 1611 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1623 if (netif_msg_link(lp)) 1612 if (netif_msg_link(lp))
1624 printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n", 1613 printk(KERN_DEBUG
1625 dev->name); 1614 "%s: Setting 100Mb-Full Duplex.\n",
1626 } 1615 dev->name);
1627 } 1616 }
1628 { 1617 }
1629 /* 1618 if (lp->phycount < 2) {
1630 * 24 Jun 2004 according AMD, in order to change the PHY, 1619 /*
1631 * DANAS (or DISPM for 79C976) must be set; then select the speed, 1620 * 24 Jun 2004 according AMD, in order to change the PHY,
1632 * duplex, and/or enable auto negotiation, and clear DANAS 1621 * DANAS (or DISPM for 79C976) must be set; then select the speed,
1633 */ 1622 * duplex, and/or enable auto negotiation, and clear DANAS
1634 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { 1623 */
1635 lp->a.write_bcr(ioaddr, 32, 1624 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
1636 lp->a.read_bcr(ioaddr, 32) | 0x0080); 1625 lp->a.write_bcr(ioaddr, 32,
1637 /* disable Auto Negotiation, set 10Mpbs, HD */ 1626 lp->a.read_bcr(ioaddr, 32) | 0x0080);
1638 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; 1627 /* disable Auto Negotiation, set 10Mpbs, HD */
1639 if (lp->options & PCNET32_PORT_FD) 1628 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
1640 val |= 0x10; 1629 if (lp->options & PCNET32_PORT_FD)
1641 if (lp->options & PCNET32_PORT_100) 1630 val |= 0x10;
1642 val |= 0x08; 1631 if (lp->options & PCNET32_PORT_100)
1643 lp->a.write_bcr (ioaddr, 32, val); 1632 val |= 0x08;
1633 lp->a.write_bcr(ioaddr, 32, val);
1634 } else {
1635 if (lp->options & PCNET32_PORT_ASEL) {
1636 lp->a.write_bcr(ioaddr, 32,
1637 lp->a.read_bcr(ioaddr,
1638 32) | 0x0080);
1639 /* enable auto negotiate, setup, disable fd */
1640 val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
1641 val |= 0x20;
1642 lp->a.write_bcr(ioaddr, 32, val);
1643 }
1644 }
1644 } else { 1645 } else {
1645 if (lp->options & PCNET32_PORT_ASEL) { 1646 int first_phy = -1;
1646 lp->a.write_bcr(ioaddr, 32, 1647 u16 bmcr;
1647 lp->a.read_bcr(ioaddr, 32) | 0x0080); 1648 u32 bcr9;
1648 /* enable auto negotiate, setup, disable fd */ 1649 struct ethtool_cmd ecmd;
1649 val = lp->a.read_bcr(ioaddr, 32) & ~0x98; 1650
1650 val |= 0x20; 1651 /*
1651 lp->a.write_bcr(ioaddr, 32, val); 1652 * There is really no good other way to handle multiple PHYs
1652 } 1653 * other than turning off all automatics
1654 */
1655 val = lp->a.read_bcr(ioaddr, 2);
1656 lp->a.write_bcr(ioaddr, 2, val & ~2);
1657 val = lp->a.read_bcr(ioaddr, 32);
1658 lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
1659
1660 if (!(lp->options & PCNET32_PORT_ASEL)) {
1661 /* setup ecmd */
1662 ecmd.port = PORT_MII;
1663 ecmd.transceiver = XCVR_INTERNAL;
1664 ecmd.autoneg = AUTONEG_DISABLE;
1665 ecmd.speed =
1666 lp->
1667 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
1668 bcr9 = lp->a.read_bcr(ioaddr, 9);
1669
1670 if (lp->options & PCNET32_PORT_FD) {
1671 ecmd.duplex = DUPLEX_FULL;
1672 bcr9 |= (1 << 0);
1673 } else {
1674 ecmd.duplex = DUPLEX_HALF;
1675 bcr9 |= ~(1 << 0);
1676 }
1677 lp->a.write_bcr(ioaddr, 9, bcr9);
1678 }
1679
1680 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1681 if (lp->phymask & (1 << i)) {
1682 /* isolate all but the first PHY */
1683 bmcr = mdio_read(dev, i, MII_BMCR);
1684 if (first_phy == -1) {
1685 first_phy = i;
1686 mdio_write(dev, i, MII_BMCR,
1687 bmcr & ~BMCR_ISOLATE);
1688 } else {
1689 mdio_write(dev, i, MII_BMCR,
1690 bmcr | BMCR_ISOLATE);
1691 }
1692 /* use mii_ethtool_sset to setup PHY */
1693 lp->mii_if.phy_id = i;
1694 ecmd.phy_address = i;
1695 if (lp->options & PCNET32_PORT_ASEL) {
1696 mii_ethtool_gset(&lp->mii_if, &ecmd);
1697 ecmd.autoneg = AUTONEG_ENABLE;
1698 }
1699 mii_ethtool_sset(&lp->mii_if, &ecmd);
1700 }
1701 }
1702 lp->mii_if.phy_id = first_phy;
1703 if (netif_msg_link(lp))
1704 printk(KERN_INFO "%s: Using PHY number %d.\n",
1705 dev->name, first_phy);
1653 } 1706 }
1654 }
1655 1707
1656#ifdef DO_DXSUFLO 1708#ifdef DO_DXSUFLO
1657 if (lp->dxsuflo) { /* Disable transmit stop on underflow */ 1709 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
1658 val = lp->a.read_csr (ioaddr, 3); 1710 val = lp->a.read_csr(ioaddr, 3);
1659 val |= 0x40; 1711 val |= 0x40;
1660 lp->a.write_csr (ioaddr, 3, val); 1712 lp->a.write_csr(ioaddr, 3, val);
1661 } 1713 }
1662#endif 1714#endif
1663 1715
1664 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 1716 lp->init_block.mode =
1665 pcnet32_load_multicast(dev); 1717 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
1666 1718 pcnet32_load_multicast(dev);
1667 if (pcnet32_init_ring(dev)) { 1719
1668 rc = -ENOMEM; 1720 if (pcnet32_init_ring(dev)) {
1669 goto err_free_ring; 1721 rc = -ENOMEM;
1670 } 1722 goto err_free_ring;
1671 1723 }
1672 /* Re-initialize the PCNET32, and start it when done. */ 1724
1673 lp->a.write_csr (ioaddr, 1, (lp->dma_addr + 1725 /* Re-initialize the PCNET32, and start it when done. */
1674 offsetof(struct pcnet32_private, init_block)) & 0xffff); 1726 lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
1675 lp->a.write_csr (ioaddr, 2, (lp->dma_addr + 1727 offsetof(struct pcnet32_private,
1676 offsetof(struct pcnet32_private, init_block)) >> 16); 1728 init_block)) & 0xffff);
1677 1729 lp->a.write_csr(ioaddr, 2,
1678 lp->a.write_csr (ioaddr, 4, 0x0915); 1730 (lp->dma_addr +
1679 lp->a.write_csr (ioaddr, 0, 0x0001); 1731 offsetof(struct pcnet32_private, init_block)) >> 16);
1680 1732
1681 netif_start_queue(dev); 1733 lp->a.write_csr(ioaddr, 4, 0x0915);
1682 1734 lp->a.write_csr(ioaddr, 0, 0x0001);
1683 /* If we have mii, print the link status and start the watchdog */ 1735
1684 if (lp->mii) { 1736 netif_start_queue(dev);
1685 mii_check_media (&lp->mii_if, netif_msg_link(lp), 1); 1737
1686 mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 1738 /* Print the link status and start the watchdog */
1687 } 1739 pcnet32_check_media(dev, 1);
1688 1740 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
1689 i = 0; 1741
1690 while (i++ < 100) 1742 i = 0;
1691 if (lp->a.read_csr (ioaddr, 0) & 0x0100) 1743 while (i++ < 100)
1692 break; 1744 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1693 /* 1745 break;
1694 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton 1746 /*
1695 * reports that doing so triggers a bug in the '974. 1747 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
1696 */ 1748 * reports that doing so triggers a bug in the '974.
1697 lp->a.write_csr (ioaddr, 0, 0x0042); 1749 */
1698 1750 lp->a.write_csr(ioaddr, 0, 0x0042);
1699 if (netif_msg_ifup(lp)) 1751
1700 printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", 1752 if (netif_msg_ifup(lp))
1701 dev->name, i, (u32) (lp->dma_addr + 1753 printk(KERN_DEBUG
1702 offsetof(struct pcnet32_private, init_block)), 1754 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
1703 lp->a.read_csr(ioaddr, 0)); 1755 dev->name, i,
1704 1756 (u32) (lp->dma_addr +
1705 spin_unlock_irqrestore(&lp->lock, flags); 1757 offsetof(struct pcnet32_private, init_block)),
1706 1758 lp->a.read_csr(ioaddr, 0));
1707 return 0; /* Always succeed */ 1759
1708 1760 spin_unlock_irqrestore(&lp->lock, flags);
1709err_free_ring: 1761
1710 /* free any allocated skbuffs */ 1762 return 0; /* Always succeed */
1711 for (i = 0; i < lp->rx_ring_size; i++) { 1763
1712 lp->rx_ring[i].status = 0; 1764 err_free_ring:
1713 if (lp->rx_skbuff[i]) { 1765 /* free any allocated skbuffs */
1714 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 1766 for (i = 0; i < lp->rx_ring_size; i++) {
1715 PCI_DMA_FROMDEVICE); 1767 lp->rx_ring[i].status = 0;
1716 dev_kfree_skb(lp->rx_skbuff[i]); 1768 if (lp->rx_skbuff[i]) {
1717 } 1769 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1718 lp->rx_skbuff[i] = NULL; 1770 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1719 lp->rx_dma_addr[i] = 0; 1771 dev_kfree_skb(lp->rx_skbuff[i]);
1720 } 1772 }
1721 1773 lp->rx_skbuff[i] = NULL;
1722 pcnet32_free_ring(dev); 1774 lp->rx_dma_addr[i] = 0;
1723 1775 }
1724 /* 1776
1725 * Switch back to 16bit mode to avoid problems with dumb 1777 pcnet32_free_ring(dev);
1726 * DOS packet driver after a warm reboot 1778
1727 */ 1779 /*
1728 lp->a.write_bcr (ioaddr, 20, 4); 1780 * Switch back to 16bit mode to avoid problems with dumb
1729 1781 * DOS packet driver after a warm reboot
1730err_free_irq: 1782 */
1731 spin_unlock_irqrestore(&lp->lock, flags); 1783 lp->a.write_bcr(ioaddr, 20, 4);
1732 free_irq(dev->irq, dev); 1784
1733 return rc; 1785 err_free_irq:
1786 spin_unlock_irqrestore(&lp->lock, flags);
1787 free_irq(dev->irq, dev);
1788 return rc;
1734} 1789}
1735 1790
1736/* 1791/*
@@ -1746,727 +1801,893 @@ err_free_irq:
1746 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com 1801 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
1747 */ 1802 */
1748 1803
1749static void 1804static void pcnet32_purge_tx_ring(struct net_device *dev)
1750pcnet32_purge_tx_ring(struct net_device *dev)
1751{ 1805{
1752 struct pcnet32_private *lp = dev->priv; 1806 struct pcnet32_private *lp = dev->priv;
1753 int i; 1807 int i;
1754
1755 for (i = 0; i < lp->tx_ring_size; i++) {
1756 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1757 wmb(); /* Make sure adapter sees owner change */
1758 if (lp->tx_skbuff[i]) {
1759 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
1760 lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1761 dev_kfree_skb_any(lp->tx_skbuff[i]);
1762 }
1763 lp->tx_skbuff[i] = NULL;
1764 lp->tx_dma_addr[i] = 0;
1765 }
1766}
1767 1808
1809 for (i = 0; i < lp->tx_ring_size; i++) {
1810 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1811 wmb(); /* Make sure adapter sees owner change */
1812 if (lp->tx_skbuff[i]) {
1813 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
1814 lp->tx_skbuff[i]->len,
1815 PCI_DMA_TODEVICE);
1816 dev_kfree_skb_any(lp->tx_skbuff[i]);
1817 }
1818 lp->tx_skbuff[i] = NULL;
1819 lp->tx_dma_addr[i] = 0;
1820 }
1821}
1768 1822
1769/* Initialize the PCNET32 Rx and Tx rings. */ 1823/* Initialize the PCNET32 Rx and Tx rings. */
1770static int 1824static int pcnet32_init_ring(struct net_device *dev)
1771pcnet32_init_ring(struct net_device *dev)
1772{ 1825{
1773 struct pcnet32_private *lp = dev->priv; 1826 struct pcnet32_private *lp = dev->priv;
1774 int i; 1827 int i;
1775 1828
1776 lp->tx_full = 0; 1829 lp->tx_full = 0;
1777 lp->cur_rx = lp->cur_tx = 0; 1830 lp->cur_rx = lp->cur_tx = 0;
1778 lp->dirty_rx = lp->dirty_tx = 0; 1831 lp->dirty_rx = lp->dirty_tx = 0;
1779 1832
1780 for (i = 0; i < lp->rx_ring_size; i++) { 1833 for (i = 0; i < lp->rx_ring_size; i++) {
1781 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 1834 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1782 if (rx_skbuff == NULL) { 1835 if (rx_skbuff == NULL) {
1783 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { 1836 if (!
1784 /* there is not much, we can do at this point */ 1837 (rx_skbuff = lp->rx_skbuff[i] =
1785 if (pcnet32_debug & NETIF_MSG_DRV) 1838 dev_alloc_skb(PKT_BUF_SZ))) {
1786 printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", 1839 /* there is not much, we can do at this point */
1787 dev->name); 1840 if (pcnet32_debug & NETIF_MSG_DRV)
1788 return -1; 1841 printk(KERN_ERR
1789 } 1842 "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
1790 skb_reserve (rx_skbuff, 2); 1843 dev->name);
1791 } 1844 return -1;
1792 1845 }
1793 rmb(); 1846 skb_reserve(rx_skbuff, 2);
1794 if (lp->rx_dma_addr[i] == 0) 1847 }
1795 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, 1848
1796 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1849 rmb();
1797 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); 1850 if (lp->rx_dma_addr[i] == 0)
1798 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 1851 lp->rx_dma_addr[i] =
1799 wmb(); /* Make sure owner changes after all others are visible */ 1852 pci_map_single(lp->pci_dev, rx_skbuff->data,
1800 lp->rx_ring[i].status = le16_to_cpu(0x8000); 1853 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1801 } 1854 lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
1802 /* The Tx buffer address is filled in as needed, but we do need to clear 1855 lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
1803 * the upper ownership bit. */ 1856 wmb(); /* Make sure owner changes after all others are visible */
1804 for (i = 0; i < lp->tx_ring_size; i++) { 1857 lp->rx_ring[i].status = le16_to_cpu(0x8000);
1805 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1858 }
1806 wmb(); /* Make sure adapter sees owner change */ 1859 /* The Tx buffer address is filled in as needed, but we do need to clear
1807 lp->tx_ring[i].base = 0; 1860 * the upper ownership bit. */
1808 lp->tx_dma_addr[i] = 0; 1861 for (i = 0; i < lp->tx_ring_size; i++) {
1809 } 1862 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1810 1863 wmb(); /* Make sure adapter sees owner change */
1811 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); 1864 lp->tx_ring[i].base = 0;
1812 for (i = 0; i < 6; i++) 1865 lp->tx_dma_addr[i] = 0;
1813 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1866 }
1814 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); 1867
1815 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); 1868 lp->init_block.tlen_rlen =
1816 wmb(); /* Make sure all changes are visible */ 1869 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1817 return 0; 1870 for (i = 0; i < 6; i++)
1871 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1872 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1873 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1874 wmb(); /* Make sure all changes are visible */
1875 return 0;
1818} 1876}
1819 1877
1820/* the pcnet32 has been issued a stop or reset. Wait for the stop bit 1878/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
1821 * then flush the pending transmit operations, re-initialize the ring, 1879 * then flush the pending transmit operations, re-initialize the ring,
1822 * and tell the chip to initialize. 1880 * and tell the chip to initialize.
1823 */ 1881 */
1824static void 1882static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1825pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1826{ 1883{
1827 struct pcnet32_private *lp = dev->priv; 1884 struct pcnet32_private *lp = dev->priv;
1828 unsigned long ioaddr = dev->base_addr; 1885 unsigned long ioaddr = dev->base_addr;
1829 int i; 1886 int i;
1830 1887
1831 /* wait for stop */ 1888 /* wait for stop */
1832 for (i=0; i<100; i++) 1889 for (i = 0; i < 100; i++)
1833 if (lp->a.read_csr(ioaddr, 0) & 0x0004) 1890 if (lp->a.read_csr(ioaddr, 0) & 0x0004)
1834 break; 1891 break;
1835 1892
1836 if (i >= 100 && netif_msg_drv(lp)) 1893 if (i >= 100 && netif_msg_drv(lp))
1837 printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n", 1894 printk(KERN_ERR
1838 dev->name); 1895 "%s: pcnet32_restart timed out waiting for stop.\n",
1896 dev->name);
1839 1897
1840 pcnet32_purge_tx_ring(dev); 1898 pcnet32_purge_tx_ring(dev);
1841 if (pcnet32_init_ring(dev)) 1899 if (pcnet32_init_ring(dev))
1842 return; 1900 return;
1843 1901
1844 /* ReInit Ring */ 1902 /* ReInit Ring */
1845 lp->a.write_csr (ioaddr, 0, 1); 1903 lp->a.write_csr(ioaddr, 0, 1);
1846 i = 0; 1904 i = 0;
1847 while (i++ < 1000) 1905 while (i++ < 1000)
1848 if (lp->a.read_csr (ioaddr, 0) & 0x0100) 1906 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1849 break; 1907 break;
1850 1908
1851 lp->a.write_csr (ioaddr, 0, csr0_bits); 1909 lp->a.write_csr(ioaddr, 0, csr0_bits);
1852} 1910}
1853 1911
1854 1912static void pcnet32_tx_timeout(struct net_device *dev)
1855static void
1856pcnet32_tx_timeout (struct net_device *dev)
1857{ 1913{
1858 struct pcnet32_private *lp = dev->priv; 1914 struct pcnet32_private *lp = dev->priv;
1859 unsigned long ioaddr = dev->base_addr, flags; 1915 unsigned long ioaddr = dev->base_addr, flags;
1860 1916
1861 spin_lock_irqsave(&lp->lock, flags); 1917 spin_lock_irqsave(&lp->lock, flags);
1862 /* Transmitter timeout, serious problems. */ 1918 /* Transmitter timeout, serious problems. */
1863 if (pcnet32_debug & NETIF_MSG_DRV) 1919 if (pcnet32_debug & NETIF_MSG_DRV)
1864 printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", 1920 printk(KERN_ERR
1865 dev->name, lp->a.read_csr(ioaddr, 0)); 1921 "%s: transmit timed out, status %4.4x, resetting.\n",
1866 lp->a.write_csr (ioaddr, 0, 0x0004); 1922 dev->name, lp->a.read_csr(ioaddr, 0));
1867 lp->stats.tx_errors++; 1923 lp->a.write_csr(ioaddr, 0, 0x0004);
1868 if (netif_msg_tx_err(lp)) { 1924 lp->stats.tx_errors++;
1869 int i; 1925 if (netif_msg_tx_err(lp)) {
1870 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 1926 int i;
1871 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 1927 printk(KERN_DEBUG
1872 lp->cur_rx); 1928 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1873 for (i = 0 ; i < lp->rx_ring_size; i++) 1929 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1874 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1930 lp->cur_rx);
1875 le32_to_cpu(lp->rx_ring[i].base), 1931 for (i = 0; i < lp->rx_ring_size; i++)
1876 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, 1932 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1877 le32_to_cpu(lp->rx_ring[i].msg_length), 1933 le32_to_cpu(lp->rx_ring[i].base),
1878 le16_to_cpu(lp->rx_ring[i].status)); 1934 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
1879 for (i = 0 ; i < lp->tx_ring_size; i++) 1935 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
1880 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1936 le16_to_cpu(lp->rx_ring[i].status));
1881 le32_to_cpu(lp->tx_ring[i].base), 1937 for (i = 0; i < lp->tx_ring_size; i++)
1882 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 1938 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1883 le32_to_cpu(lp->tx_ring[i].misc), 1939 le32_to_cpu(lp->tx_ring[i].base),
1884 le16_to_cpu(lp->tx_ring[i].status)); 1940 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
1885 printk("\n"); 1941 le32_to_cpu(lp->tx_ring[i].misc),
1886 } 1942 le16_to_cpu(lp->tx_ring[i].status));
1887 pcnet32_restart(dev, 0x0042); 1943 printk("\n");
1888 1944 }
1889 dev->trans_start = jiffies; 1945 pcnet32_restart(dev, 0x0042);
1890 netif_wake_queue(dev); 1946
1891 1947 dev->trans_start = jiffies;
1892 spin_unlock_irqrestore(&lp->lock, flags); 1948 netif_wake_queue(dev);
1893}
1894 1949
1950 spin_unlock_irqrestore(&lp->lock, flags);
1951}
1895 1952
1896static int 1953static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1897pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1898{ 1954{
1899 struct pcnet32_private *lp = dev->priv; 1955 struct pcnet32_private *lp = dev->priv;
1900 unsigned long ioaddr = dev->base_addr; 1956 unsigned long ioaddr = dev->base_addr;
1901 u16 status; 1957 u16 status;
1902 int entry; 1958 int entry;
1903 unsigned long flags; 1959 unsigned long flags;
1904 1960
1905 spin_lock_irqsave(&lp->lock, flags); 1961 spin_lock_irqsave(&lp->lock, flags);
1906 1962
1907 if (netif_msg_tx_queued(lp)) { 1963 if (netif_msg_tx_queued(lp)) {
1908 printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", 1964 printk(KERN_DEBUG
1909 dev->name, lp->a.read_csr(ioaddr, 0)); 1965 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
1910 } 1966 dev->name, lp->a.read_csr(ioaddr, 0));
1967 }
1911 1968
1912 /* Default status -- will not enable Successful-TxDone 1969 /* Default status -- will not enable Successful-TxDone
1913 * interrupt when that option is available to us. 1970 * interrupt when that option is available to us.
1914 */ 1971 */
1915 status = 0x8300; 1972 status = 0x8300;
1916 1973
1917 /* Fill in a Tx ring entry */ 1974 /* Fill in a Tx ring entry */
1918 1975
1919 /* Mask to ring buffer boundary. */ 1976 /* Mask to ring buffer boundary. */
1920 entry = lp->cur_tx & lp->tx_mod_mask; 1977 entry = lp->cur_tx & lp->tx_mod_mask;
1921 1978
1922 /* Caution: the write order is important here, set the status 1979 /* Caution: the write order is important here, set the status
1923 * with the "ownership" bits last. */ 1980 * with the "ownership" bits last. */
1924 1981
1925 lp->tx_ring[entry].length = le16_to_cpu(-skb->len); 1982 lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
1926 1983
1927 lp->tx_ring[entry].misc = 0x00000000; 1984 lp->tx_ring[entry].misc = 0x00000000;
1928 1985
1929 lp->tx_skbuff[entry] = skb; 1986 lp->tx_skbuff[entry] = skb;
1930 lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, 1987 lp->tx_dma_addr[entry] =
1931 PCI_DMA_TODEVICE); 1988 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1932 lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); 1989 lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
1933 wmb(); /* Make sure owner changes after all others are visible */ 1990 wmb(); /* Make sure owner changes after all others are visible */
1934 lp->tx_ring[entry].status = le16_to_cpu(status); 1991 lp->tx_ring[entry].status = le16_to_cpu(status);
1935 1992
1936 lp->cur_tx++; 1993 lp->cur_tx++;
1937 lp->stats.tx_bytes += skb->len; 1994 lp->stats.tx_bytes += skb->len;
1938 1995
1939 /* Trigger an immediate send poll. */ 1996 /* Trigger an immediate send poll. */
1940 lp->a.write_csr (ioaddr, 0, 0x0048); 1997 lp->a.write_csr(ioaddr, 0, 0x0048);
1941 1998
1942 dev->trans_start = jiffies; 1999 dev->trans_start = jiffies;
1943 2000
1944 if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { 2001 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
1945 lp->tx_full = 1; 2002 lp->tx_full = 1;
1946 netif_stop_queue(dev); 2003 netif_stop_queue(dev);
1947 } 2004 }
1948 spin_unlock_irqrestore(&lp->lock, flags); 2005 spin_unlock_irqrestore(&lp->lock, flags);
1949 return 0; 2006 return 0;
1950} 2007}
1951 2008
1952/* The PCNET32 interrupt handler. */ 2009/* The PCNET32 interrupt handler. */
1953static irqreturn_t 2010static irqreturn_t
1954pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) 2011pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1955{ 2012{
1956 struct net_device *dev = dev_id; 2013 struct net_device *dev = dev_id;
1957 struct pcnet32_private *lp; 2014 struct pcnet32_private *lp;
1958 unsigned long ioaddr; 2015 unsigned long ioaddr;
1959 u16 csr0,rap; 2016 u16 csr0, rap;
1960 int boguscnt = max_interrupt_work; 2017 int boguscnt = max_interrupt_work;
1961 int must_restart; 2018 int must_restart;
1962 2019
1963 if (!dev) { 2020 if (!dev) {
1964 if (pcnet32_debug & NETIF_MSG_INTR) 2021 if (pcnet32_debug & NETIF_MSG_INTR)
1965 printk (KERN_DEBUG "%s(): irq %d for unknown device\n", 2022 printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
1966 __FUNCTION__, irq); 2023 __FUNCTION__, irq);
1967 return IRQ_NONE; 2024 return IRQ_NONE;
1968 }
1969
1970 ioaddr = dev->base_addr;
1971 lp = dev->priv;
1972
1973 spin_lock(&lp->lock);
1974
1975 rap = lp->a.read_rap(ioaddr);
1976 while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
1977 if (csr0 == 0xffff) {
1978 break; /* PCMCIA remove happened */
1979 } 2025 }
1980 /* Acknowledge all of the current interrupt sources ASAP. */
1981 lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
1982 2026
1983 must_restart = 0; 2027 ioaddr = dev->base_addr;
2028 lp = dev->priv;
1984 2029
1985 if (netif_msg_intr(lp)) 2030 spin_lock(&lp->lock);
1986 printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", 2031
1987 dev->name, csr0, lp->a.read_csr (ioaddr, 0)); 2032 rap = lp->a.read_rap(ioaddr);
1988 2033 while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
1989 if (csr0 & 0x0400) /* Rx interrupt */ 2034 if (csr0 == 0xffff) {
1990 pcnet32_rx(dev); 2035 break; /* PCMCIA remove happened */
1991 2036 }
1992 if (csr0 & 0x0200) { /* Tx-done interrupt */ 2037 /* Acknowledge all of the current interrupt sources ASAP. */
1993 unsigned int dirty_tx = lp->dirty_tx; 2038 lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
1994 int delta; 2039
1995 2040 must_restart = 0;
1996 while (dirty_tx != lp->cur_tx) { 2041
1997 int entry = dirty_tx & lp->tx_mod_mask; 2042 if (netif_msg_intr(lp))
1998 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 2043 printk(KERN_DEBUG
1999 2044 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
2000 if (status < 0) 2045 dev->name, csr0, lp->a.read_csr(ioaddr, 0));
2001 break; /* It still hasn't been Txed */ 2046
2002 2047 if (csr0 & 0x0400) /* Rx interrupt */
2003 lp->tx_ring[entry].base = 0; 2048 pcnet32_rx(dev);
2004 2049
2005 if (status & 0x4000) { 2050 if (csr0 & 0x0200) { /* Tx-done interrupt */
2006 /* There was an major error, log it. */ 2051 unsigned int dirty_tx = lp->dirty_tx;
2007 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); 2052 int delta;
2008 lp->stats.tx_errors++; 2053
2009 if (netif_msg_tx_err(lp)) 2054 while (dirty_tx != lp->cur_tx) {
2010 printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n", 2055 int entry = dirty_tx & lp->tx_mod_mask;
2011 dev->name, status, err_status); 2056 int status =
2012 if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; 2057 (short)le16_to_cpu(lp->tx_ring[entry].
2013 if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; 2058 status);
2014 if (err_status & 0x10000000) lp->stats.tx_window_errors++; 2059
2060 if (status < 0)
2061 break; /* It still hasn't been Txed */
2062
2063 lp->tx_ring[entry].base = 0;
2064
2065 if (status & 0x4000) {
2066 /* There was an major error, log it. */
2067 int err_status =
2068 le32_to_cpu(lp->tx_ring[entry].
2069 misc);
2070 lp->stats.tx_errors++;
2071 if (netif_msg_tx_err(lp))
2072 printk(KERN_ERR
2073 "%s: Tx error status=%04x err_status=%08x\n",
2074 dev->name, status,
2075 err_status);
2076 if (err_status & 0x04000000)
2077 lp->stats.tx_aborted_errors++;
2078 if (err_status & 0x08000000)
2079 lp->stats.tx_carrier_errors++;
2080 if (err_status & 0x10000000)
2081 lp->stats.tx_window_errors++;
2015#ifndef DO_DXSUFLO 2082#ifndef DO_DXSUFLO
2016 if (err_status & 0x40000000) { 2083 if (err_status & 0x40000000) {
2017 lp->stats.tx_fifo_errors++; 2084 lp->stats.tx_fifo_errors++;
2018 /* Ackk! On FIFO errors the Tx unit is turned off! */ 2085 /* Ackk! On FIFO errors the Tx unit is turned off! */
2019 /* Remove this verbosity later! */ 2086 /* Remove this verbosity later! */
2020 if (netif_msg_tx_err(lp)) 2087 if (netif_msg_tx_err(lp))
2021 printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", 2088 printk(KERN_ERR
2022 dev->name, csr0); 2089 "%s: Tx FIFO error! CSR0=%4.4x\n",
2023 must_restart = 1; 2090 dev->name, csr0);
2024 } 2091 must_restart = 1;
2092 }
2025#else 2093#else
2026 if (err_status & 0x40000000) { 2094 if (err_status & 0x40000000) {
2027 lp->stats.tx_fifo_errors++; 2095 lp->stats.tx_fifo_errors++;
2028 if (! lp->dxsuflo) { /* If controller doesn't recover ... */ 2096 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
2029 /* Ackk! On FIFO errors the Tx unit is turned off! */ 2097 /* Ackk! On FIFO errors the Tx unit is turned off! */
2030 /* Remove this verbosity later! */ 2098 /* Remove this verbosity later! */
2031 if (netif_msg_tx_err(lp)) 2099 if (netif_msg_tx_err
2032 printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", 2100 (lp))
2033 dev->name, csr0); 2101 printk(KERN_ERR
2034 must_restart = 1; 2102 "%s: Tx FIFO error! CSR0=%4.4x\n",
2035 } 2103 dev->
2036 } 2104 name,
2105 csr0);
2106 must_restart = 1;
2107 }
2108 }
2037#endif 2109#endif
2038 } else { 2110 } else {
2039 if (status & 0x1800) 2111 if (status & 0x1800)
2040 lp->stats.collisions++; 2112 lp->stats.collisions++;
2041 lp->stats.tx_packets++; 2113 lp->stats.tx_packets++;
2114 }
2115
2116 /* We must free the original skb */
2117 if (lp->tx_skbuff[entry]) {
2118 pci_unmap_single(lp->pci_dev,
2119 lp->tx_dma_addr[entry],
2120 lp->tx_skbuff[entry]->
2121 len, PCI_DMA_TODEVICE);
2122 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
2123 lp->tx_skbuff[entry] = NULL;
2124 lp->tx_dma_addr[entry] = 0;
2125 }
2126 dirty_tx++;
2127 }
2128
2129 delta =
2130 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
2131 lp->tx_ring_size);
2132 if (delta > lp->tx_ring_size) {
2133 if (netif_msg_drv(lp))
2134 printk(KERN_ERR
2135 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
2136 dev->name, dirty_tx, lp->cur_tx,
2137 lp->tx_full);
2138 dirty_tx += lp->tx_ring_size;
2139 delta -= lp->tx_ring_size;
2140 }
2141
2142 if (lp->tx_full &&
2143 netif_queue_stopped(dev) &&
2144 delta < lp->tx_ring_size - 2) {
2145 /* The ring is no longer full, clear tbusy. */
2146 lp->tx_full = 0;
2147 netif_wake_queue(dev);
2148 }
2149 lp->dirty_tx = dirty_tx;
2150 }
2151
2152 /* Log misc errors. */
2153 if (csr0 & 0x4000)
2154 lp->stats.tx_errors++; /* Tx babble. */
2155 if (csr0 & 0x1000) {
2156 /*
2157 * this happens when our receive ring is full. This shouldn't
2158 * be a problem as we will see normal rx interrupts for the frames
2159 * in the receive ring. But there are some PCI chipsets (I can
2160 * reproduce this on SP3G with Intel saturn chipset) which have
2161 * sometimes problems and will fill up the receive ring with
2162 * error descriptors. In this situation we don't get a rx
2163 * interrupt, but a missed frame interrupt sooner or later.
2164 * So we try to clean up our receive ring here.
2165 */
2166 pcnet32_rx(dev);
2167 lp->stats.rx_errors++; /* Missed a Rx frame. */
2168 }
2169 if (csr0 & 0x0800) {
2170 if (netif_msg_drv(lp))
2171 printk(KERN_ERR
2172 "%s: Bus master arbitration failure, status %4.4x.\n",
2173 dev->name, csr0);
2174 /* unlike for the lance, there is no restart needed */
2042 } 2175 }
2043 2176
2044 /* We must free the original skb */ 2177 if (must_restart) {
2045 if (lp->tx_skbuff[entry]) { 2178 /* reset the chip to clear the error condition, then restart */
2046 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], 2179 lp->a.reset(ioaddr);
2047 lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); 2180 lp->a.write_csr(ioaddr, 4, 0x0915);
2048 dev_kfree_skb_irq(lp->tx_skbuff[entry]); 2181 pcnet32_restart(dev, 0x0002);
2049 lp->tx_skbuff[entry] = NULL; 2182 netif_wake_queue(dev);
2050 lp->tx_dma_addr[entry] = 0;
2051 } 2183 }
2052 dirty_tx++; 2184 }
2053 } 2185
2054 2186 /* Set interrupt enable. */
2055 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); 2187 lp->a.write_csr(ioaddr, 0, 0x0040);
2056 if (delta > lp->tx_ring_size) { 2188 lp->a.write_rap(ioaddr, rap);
2057 if (netif_msg_drv(lp)) 2189
2058 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 2190 if (netif_msg_intr(lp))
2059 dev->name, dirty_tx, lp->cur_tx, lp->tx_full); 2191 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2060 dirty_tx += lp->tx_ring_size; 2192 dev->name, lp->a.read_csr(ioaddr, 0));
2061 delta -= lp->tx_ring_size; 2193
2062 } 2194 spin_unlock(&lp->lock);
2063 2195
2064 if (lp->tx_full && 2196 return IRQ_HANDLED;
2065 netif_queue_stopped(dev) &&
2066 delta < lp->tx_ring_size - 2) {
2067 /* The ring is no longer full, clear tbusy. */
2068 lp->tx_full = 0;
2069 netif_wake_queue (dev);
2070 }
2071 lp->dirty_tx = dirty_tx;
2072 }
2073
2074 /* Log misc errors. */
2075 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
2076 if (csr0 & 0x1000) {
2077 /*
2078 * this happens when our receive ring is full. This shouldn't
2079 * be a problem as we will see normal rx interrupts for the frames
2080 * in the receive ring. But there are some PCI chipsets (I can
2081 * reproduce this on SP3G with Intel saturn chipset) which have
2082 * sometimes problems and will fill up the receive ring with
2083 * error descriptors. In this situation we don't get a rx
2084 * interrupt, but a missed frame interrupt sooner or later.
2085 * So we try to clean up our receive ring here.
2086 */
2087 pcnet32_rx(dev);
2088 lp->stats.rx_errors++; /* Missed a Rx frame. */
2089 }
2090 if (csr0 & 0x0800) {
2091 if (netif_msg_drv(lp))
2092 printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
2093 dev->name, csr0);
2094 /* unlike for the lance, there is no restart needed */
2095 }
2096
2097 if (must_restart) {
2098 /* reset the chip to clear the error condition, then restart */
2099 lp->a.reset(ioaddr);
2100 lp->a.write_csr(ioaddr, 4, 0x0915);
2101 pcnet32_restart(dev, 0x0002);
2102 netif_wake_queue(dev);
2103 }
2104 }
2105
2106 /* Set interrupt enable. */
2107 lp->a.write_csr (ioaddr, 0, 0x0040);
2108 lp->a.write_rap (ioaddr,rap);
2109
2110 if (netif_msg_intr(lp))
2111 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2112 dev->name, lp->a.read_csr (ioaddr, 0));
2113
2114 spin_unlock(&lp->lock);
2115
2116 return IRQ_HANDLED;
2117} 2197}
2118 2198
2119static int 2199static int pcnet32_rx(struct net_device *dev)
2120pcnet32_rx(struct net_device *dev)
2121{ 2200{
2122 struct pcnet32_private *lp = dev->priv; 2201 struct pcnet32_private *lp = dev->priv;
2123 int entry = lp->cur_rx & lp->rx_mod_mask; 2202 int entry = lp->cur_rx & lp->rx_mod_mask;
2124 int boguscnt = lp->rx_ring_size / 2; 2203 int boguscnt = lp->rx_ring_size / 2;
2125 2204
2126 /* If we own the next entry, it's a new packet. Send it up. */ 2205 /* If we own the next entry, it's a new packet. Send it up. */
2127 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 2206 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
2128 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; 2207 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
2129 2208
2130 if (status != 0x03) { /* There was an error. */ 2209 if (status != 0x03) { /* There was an error. */
2131 /* 2210 /*
2132 * There is a tricky error noted by John Murphy, 2211 * There is a tricky error noted by John Murphy,
2133 * <murf@perftech.com> to Russ Nelson: Even with full-sized 2212 * <murf@perftech.com> to Russ Nelson: Even with full-sized
2134 * buffers it's possible for a jabber packet to use two 2213 * buffers it's possible for a jabber packet to use two
2135 * buffers, with only the last correctly noting the error. 2214 * buffers, with only the last correctly noting the error.
2136 */ 2215 */
2137 if (status & 0x01) /* Only count a general error at the */ 2216 if (status & 0x01) /* Only count a general error at the */
2138 lp->stats.rx_errors++; /* end of a packet.*/ 2217 lp->stats.rx_errors++; /* end of a packet. */
2139 if (status & 0x20) lp->stats.rx_frame_errors++; 2218 if (status & 0x20)
2140 if (status & 0x10) lp->stats.rx_over_errors++; 2219 lp->stats.rx_frame_errors++;
2141 if (status & 0x08) lp->stats.rx_crc_errors++; 2220 if (status & 0x10)
2142 if (status & 0x04) lp->stats.rx_fifo_errors++; 2221 lp->stats.rx_over_errors++;
2143 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); 2222 if (status & 0x08)
2144 } else { 2223 lp->stats.rx_crc_errors++;
2145 /* Malloc up new buffer, compatible with net-2e. */ 2224 if (status & 0x04)
2146 short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; 2225 lp->stats.rx_fifo_errors++;
2147 struct sk_buff *skb; 2226 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
2148
2149 /* Discard oversize frames. */
2150 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2151 if (netif_msg_drv(lp))
2152 printk(KERN_ERR "%s: Impossible packet size %d!\n",
2153 dev->name, pkt_len);
2154 lp->stats.rx_errors++;
2155 } else if (pkt_len < 60) {
2156 if (netif_msg_rx_err(lp))
2157 printk(KERN_ERR "%s: Runt packet!\n", dev->name);
2158 lp->stats.rx_errors++;
2159 } else {
2160 int rx_in_place = 0;
2161
2162 if (pkt_len > rx_copybreak) {
2163 struct sk_buff *newskb;
2164
2165 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
2166 skb_reserve (newskb, 2);
2167 skb = lp->rx_skbuff[entry];
2168 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry],
2169 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
2170 skb_put (skb, pkt_len);
2171 lp->rx_skbuff[entry] = newskb;
2172 newskb->dev = dev;
2173 lp->rx_dma_addr[entry] =
2174 pci_map_single(lp->pci_dev, newskb->data,
2175 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
2176 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
2177 rx_in_place = 1;
2178 } else
2179 skb = NULL;
2180 } else { 2227 } else {
2181 skb = dev_alloc_skb(pkt_len+2); 2228 /* Malloc up new buffer, compatible with net-2e. */
2182 } 2229 short pkt_len =
2183 2230 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
2184 if (skb == NULL) { 2231 - 4;
2185 int i; 2232 struct sk_buff *skb;
2186 if (netif_msg_drv(lp)) 2233
2187 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", 2234 /* Discard oversize frames. */
2188 dev->name); 2235 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2189 for (i = 0; i < lp->rx_ring_size; i++) 2236 if (netif_msg_drv(lp))
2190 if ((short)le16_to_cpu(lp->rx_ring[(entry+i) 2237 printk(KERN_ERR
2191 & lp->rx_mod_mask].status) < 0) 2238 "%s: Impossible packet size %d!\n",
2192 break; 2239 dev->name, pkt_len);
2193 2240 lp->stats.rx_errors++;
2194 if (i > lp->rx_ring_size -2) { 2241 } else if (pkt_len < 60) {
2195 lp->stats.rx_dropped++; 2242 if (netif_msg_rx_err(lp))
2196 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2243 printk(KERN_ERR "%s: Runt packet!\n",
2197 wmb(); /* Make sure adapter sees owner change */ 2244 dev->name);
2198 lp->cur_rx++; 2245 lp->stats.rx_errors++;
2199 } 2246 } else {
2200 break; 2247 int rx_in_place = 0;
2201 } 2248
2202 skb->dev = dev; 2249 if (pkt_len > rx_copybreak) {
2203 if (!rx_in_place) { 2250 struct sk_buff *newskb;
2204 skb_reserve(skb,2); /* 16 byte align */ 2251
2205 skb_put(skb,pkt_len); /* Make room */ 2252 if ((newskb =
2206 pci_dma_sync_single_for_cpu(lp->pci_dev, 2253 dev_alloc_skb(PKT_BUF_SZ))) {
2207 lp->rx_dma_addr[entry], 2254 skb_reserve(newskb, 2);
2208 PKT_BUF_SZ-2, 2255 skb = lp->rx_skbuff[entry];
2209 PCI_DMA_FROMDEVICE); 2256 pci_unmap_single(lp->pci_dev,
2210 eth_copy_and_sum(skb, 2257 lp->
2211 (unsigned char *)(lp->rx_skbuff[entry]->data), 2258 rx_dma_addr
2212 pkt_len,0); 2259 [entry],
2213 pci_dma_sync_single_for_device(lp->pci_dev, 2260 PKT_BUF_SZ - 2,
2214 lp->rx_dma_addr[entry], 2261 PCI_DMA_FROMDEVICE);
2215 PKT_BUF_SZ-2, 2262 skb_put(skb, pkt_len);
2216 PCI_DMA_FROMDEVICE); 2263 lp->rx_skbuff[entry] = newskb;
2264 newskb->dev = dev;
2265 lp->rx_dma_addr[entry] =
2266 pci_map_single(lp->pci_dev,
2267 newskb->data,
2268 PKT_BUF_SZ -
2269 2,
2270 PCI_DMA_FROMDEVICE);
2271 lp->rx_ring[entry].base =
2272 le32_to_cpu(lp->
2273 rx_dma_addr
2274 [entry]);
2275 rx_in_place = 1;
2276 } else
2277 skb = NULL;
2278 } else {
2279 skb = dev_alloc_skb(pkt_len + 2);
2280 }
2281
2282 if (skb == NULL) {
2283 int i;
2284 if (netif_msg_drv(lp))
2285 printk(KERN_ERR
2286 "%s: Memory squeeze, deferring packet.\n",
2287 dev->name);
2288 for (i = 0; i < lp->rx_ring_size; i++)
2289 if ((short)
2290 le16_to_cpu(lp->
2291 rx_ring[(entry +
2292 i)
2293 & lp->
2294 rx_mod_mask].
2295 status) < 0)
2296 break;
2297
2298 if (i > lp->rx_ring_size - 2) {
2299 lp->stats.rx_dropped++;
2300 lp->rx_ring[entry].status |=
2301 le16_to_cpu(0x8000);
2302 wmb(); /* Make sure adapter sees owner change */
2303 lp->cur_rx++;
2304 }
2305 break;
2306 }
2307 skb->dev = dev;
2308 if (!rx_in_place) {
2309 skb_reserve(skb, 2); /* 16 byte align */
2310 skb_put(skb, pkt_len); /* Make room */
2311 pci_dma_sync_single_for_cpu(lp->pci_dev,
2312 lp->
2313 rx_dma_addr
2314 [entry],
2315 PKT_BUF_SZ -
2316 2,
2317 PCI_DMA_FROMDEVICE);
2318 eth_copy_and_sum(skb,
2319 (unsigned char *)(lp->
2320 rx_skbuff
2321 [entry]->
2322 data),
2323 pkt_len, 0);
2324 pci_dma_sync_single_for_device(lp->
2325 pci_dev,
2326 lp->
2327 rx_dma_addr
2328 [entry],
2329 PKT_BUF_SZ
2330 - 2,
2331 PCI_DMA_FROMDEVICE);
2332 }
2333 lp->stats.rx_bytes += skb->len;
2334 skb->protocol = eth_type_trans(skb, dev);
2335 netif_rx(skb);
2336 dev->last_rx = jiffies;
2337 lp->stats.rx_packets++;
2338 }
2217 } 2339 }
2218 lp->stats.rx_bytes += skb->len; 2340 /*
2219 skb->protocol=eth_type_trans(skb,dev); 2341 * The docs say that the buffer length isn't touched, but Andrew Boyd
2220 netif_rx(skb); 2342 * of QNX reports that some revs of the 79C965 clear it.
2221 dev->last_rx = jiffies; 2343 */
2222 lp->stats.rx_packets++; 2344 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2223 } 2345 wmb(); /* Make sure owner changes after all others are visible */
2346 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2347 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2348 if (--boguscnt <= 0)
2349 break; /* don't stay in loop forever */
2224 } 2350 }
2225 /* 2351
2226 * The docs say that the buffer length isn't touched, but Andrew Boyd 2352 return 0;
2227 * of QNX reports that some revs of the 79C965 clear it.
2228 */
2229 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
2230 wmb(); /* Make sure owner changes after all others are visible */
2231 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2232 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2233 if (--boguscnt <= 0) break; /* don't stay in loop forever */
2234 }
2235
2236 return 0;
2237} 2353}
2238 2354
2239static int 2355static int pcnet32_close(struct net_device *dev)
2240pcnet32_close(struct net_device *dev)
2241{ 2356{
2242 unsigned long ioaddr = dev->base_addr; 2357 unsigned long ioaddr = dev->base_addr;
2243 struct pcnet32_private *lp = dev->priv; 2358 struct pcnet32_private *lp = dev->priv;
2244 int i; 2359 int i;
2245 unsigned long flags; 2360 unsigned long flags;
2246 2361
2247 del_timer_sync(&lp->watchdog_timer); 2362 del_timer_sync(&lp->watchdog_timer);
2248 2363
2249 netif_stop_queue(dev); 2364 netif_stop_queue(dev);
2250 2365
2251 spin_lock_irqsave(&lp->lock, flags); 2366 spin_lock_irqsave(&lp->lock, flags);
2252 2367
2253 lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); 2368 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2254 2369
2255 if (netif_msg_ifdown(lp)) 2370 if (netif_msg_ifdown(lp))
2256 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 2371 printk(KERN_DEBUG
2257 dev->name, lp->a.read_csr (ioaddr, 0)); 2372 "%s: Shutting down ethercard, status was %2.2x.\n",
2373 dev->name, lp->a.read_csr(ioaddr, 0));
2258 2374
2259 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ 2375 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2260 lp->a.write_csr (ioaddr, 0, 0x0004); 2376 lp->a.write_csr(ioaddr, 0, 0x0004);
2261 2377
2262 /* 2378 /*
2263 * Switch back to 16bit mode to avoid problems with dumb 2379 * Switch back to 16bit mode to avoid problems with dumb
2264 * DOS packet driver after a warm reboot 2380 * DOS packet driver after a warm reboot
2265 */ 2381 */
2266 lp->a.write_bcr (ioaddr, 20, 4); 2382 lp->a.write_bcr(ioaddr, 20, 4);
2267 2383
2268 spin_unlock_irqrestore(&lp->lock, flags); 2384 spin_unlock_irqrestore(&lp->lock, flags);
2269 2385
2270 free_irq(dev->irq, dev); 2386 free_irq(dev->irq, dev);
2271 2387
2272 spin_lock_irqsave(&lp->lock, flags); 2388 spin_lock_irqsave(&lp->lock, flags);
2273 2389
2274 /* free all allocated skbuffs */ 2390 /* free all allocated skbuffs */
2275 for (i = 0; i < lp->rx_ring_size; i++) { 2391 for (i = 0; i < lp->rx_ring_size; i++) {
2276 lp->rx_ring[i].status = 0; 2392 lp->rx_ring[i].status = 0;
2277 wmb(); /* Make sure adapter sees owner change */ 2393 wmb(); /* Make sure adapter sees owner change */
2278 if (lp->rx_skbuff[i]) { 2394 if (lp->rx_skbuff[i]) {
2279 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 2395 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2280 PCI_DMA_FROMDEVICE); 2396 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2281 dev_kfree_skb(lp->rx_skbuff[i]); 2397 dev_kfree_skb(lp->rx_skbuff[i]);
2398 }
2399 lp->rx_skbuff[i] = NULL;
2400 lp->rx_dma_addr[i] = 0;
2282 } 2401 }
2283 lp->rx_skbuff[i] = NULL;
2284 lp->rx_dma_addr[i] = 0;
2285 }
2286 2402
2287 for (i = 0; i < lp->tx_ring_size; i++) { 2403 for (i = 0; i < lp->tx_ring_size; i++) {
2288 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2404 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2289 wmb(); /* Make sure adapter sees owner change */ 2405 wmb(); /* Make sure adapter sees owner change */
2290 if (lp->tx_skbuff[i]) { 2406 if (lp->tx_skbuff[i]) {
2291 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], 2407 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2292 lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); 2408 lp->tx_skbuff[i]->len,
2293 dev_kfree_skb(lp->tx_skbuff[i]); 2409 PCI_DMA_TODEVICE);
2410 dev_kfree_skb(lp->tx_skbuff[i]);
2411 }
2412 lp->tx_skbuff[i] = NULL;
2413 lp->tx_dma_addr[i] = 0;
2294 } 2414 }
2295 lp->tx_skbuff[i] = NULL;
2296 lp->tx_dma_addr[i] = 0;
2297 }
2298 2415
2299 spin_unlock_irqrestore(&lp->lock, flags); 2416 spin_unlock_irqrestore(&lp->lock, flags);
2300 2417
2301 return 0; 2418 return 0;
2302} 2419}
2303 2420
2304static struct net_device_stats * 2421static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2305pcnet32_get_stats(struct net_device *dev)
2306{ 2422{
2307 struct pcnet32_private *lp = dev->priv; 2423 struct pcnet32_private *lp = dev->priv;
2308 unsigned long ioaddr = dev->base_addr; 2424 unsigned long ioaddr = dev->base_addr;
2309 u16 saved_addr; 2425 u16 saved_addr;
2310 unsigned long flags; 2426 unsigned long flags;
2311 2427
2312 spin_lock_irqsave(&lp->lock, flags); 2428 spin_lock_irqsave(&lp->lock, flags);
2313 saved_addr = lp->a.read_rap(ioaddr); 2429 saved_addr = lp->a.read_rap(ioaddr);
2314 lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); 2430 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2315 lp->a.write_rap(ioaddr, saved_addr); 2431 lp->a.write_rap(ioaddr, saved_addr);
2316 spin_unlock_irqrestore(&lp->lock, flags); 2432 spin_unlock_irqrestore(&lp->lock, flags);
2317 2433
2318 return &lp->stats; 2434 return &lp->stats;
2319} 2435}
2320 2436
2321/* taken from the sunlance driver, which it took from the depca driver */ 2437/* taken from the sunlance driver, which it took from the depca driver */
2322static void pcnet32_load_multicast (struct net_device *dev) 2438static void pcnet32_load_multicast(struct net_device *dev)
2323{ 2439{
2324 struct pcnet32_private *lp = dev->priv; 2440 struct pcnet32_private *lp = dev->priv;
2325 volatile struct pcnet32_init_block *ib = &lp->init_block; 2441 volatile struct pcnet32_init_block *ib = &lp->init_block;
2326 volatile u16 *mcast_table = (u16 *)&ib->filter; 2442 volatile u16 *mcast_table = (u16 *) & ib->filter;
2327 struct dev_mc_list *dmi=dev->mc_list; 2443 struct dev_mc_list *dmi = dev->mc_list;
2328 char *addrs; 2444 char *addrs;
2329 int i; 2445 int i;
2330 u32 crc; 2446 u32 crc;
2331 2447
2332 /* set all multicast bits */ 2448 /* set all multicast bits */
2333 if (dev->flags & IFF_ALLMULTI) { 2449 if (dev->flags & IFF_ALLMULTI) {
2334 ib->filter[0] = 0xffffffff; 2450 ib->filter[0] = 0xffffffff;
2335 ib->filter[1] = 0xffffffff; 2451 ib->filter[1] = 0xffffffff;
2452 return;
2453 }
2454 /* clear the multicast filter */
2455 ib->filter[0] = 0;
2456 ib->filter[1] = 0;
2457
2458 /* Add addresses */
2459 for (i = 0; i < dev->mc_count; i++) {
2460 addrs = dmi->dmi_addr;
2461 dmi = dmi->next;
2462
2463 /* multicast address? */
2464 if (!(*addrs & 1))
2465 continue;
2466
2467 crc = ether_crc_le(6, addrs);
2468 crc = crc >> 26;
2469 mcast_table[crc >> 4] =
2470 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2471 (1 << (crc & 0xf)));
2472 }
2336 return; 2473 return;
2337 }
2338 /* clear the multicast filter */
2339 ib->filter[0] = 0;
2340 ib->filter[1] = 0;
2341
2342 /* Add addresses */
2343 for (i = 0; i < dev->mc_count; i++) {
2344 addrs = dmi->dmi_addr;
2345 dmi = dmi->next;
2346
2347 /* multicast address? */
2348 if (!(*addrs & 1))
2349 continue;
2350
2351 crc = ether_crc_le(6, addrs);
2352 crc = crc >> 26;
2353 mcast_table [crc >> 4] = le16_to_cpu(
2354 le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf)));
2355 }
2356 return;
2357} 2474}
2358 2475
2359
2360/* 2476/*
2361 * Set or clear the multicast filter for this adaptor. 2477 * Set or clear the multicast filter for this adaptor.
2362 */ 2478 */
2363static void pcnet32_set_multicast_list(struct net_device *dev) 2479static void pcnet32_set_multicast_list(struct net_device *dev)
2364{ 2480{
2365 unsigned long ioaddr = dev->base_addr, flags; 2481 unsigned long ioaddr = dev->base_addr, flags;
2366 struct pcnet32_private *lp = dev->priv; 2482 struct pcnet32_private *lp = dev->priv;
2367 2483
2368 spin_lock_irqsave(&lp->lock, flags); 2484 spin_lock_irqsave(&lp->lock, flags);
2369 if (dev->flags&IFF_PROMISC) { 2485 if (dev->flags & IFF_PROMISC) {
2370 /* Log any net taps. */ 2486 /* Log any net taps. */
2371 if (netif_msg_hw(lp)) 2487 if (netif_msg_hw(lp))
2372 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); 2488 printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2373 lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); 2489 dev->name);
2374 } else { 2490 lp->init_block.mode =
2375 lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 2491 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2376 pcnet32_load_multicast (dev); 2492 7);
2377 } 2493 } else {
2378 2494 lp->init_block.mode =
2379 lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ 2495 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2380 pcnet32_restart(dev, 0x0042); /* Resume normal operation */ 2496 pcnet32_load_multicast(dev);
2381 netif_wake_queue(dev); 2497 }
2382 2498
2383 spin_unlock_irqrestore(&lp->lock, flags); 2499 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
2500 pcnet32_restart(dev, 0x0042); /* Resume normal operation */
2501 netif_wake_queue(dev);
2502
2503 spin_unlock_irqrestore(&lp->lock, flags);
2384} 2504}
2385 2505
2386/* This routine assumes that the lp->lock is held */ 2506/* This routine assumes that the lp->lock is held */
2387static int mdio_read(struct net_device *dev, int phy_id, int reg_num) 2507static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2388{ 2508{
2389 struct pcnet32_private *lp = dev->priv; 2509 struct pcnet32_private *lp = dev->priv;
2390 unsigned long ioaddr = dev->base_addr; 2510 unsigned long ioaddr = dev->base_addr;
2391 u16 val_out; 2511 u16 val_out;
2392 2512
2393 if (!lp->mii) 2513 if (!lp->mii)
2394 return 0; 2514 return 0;
2395 2515
2396 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2516 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2397 val_out = lp->a.read_bcr(ioaddr, 34); 2517 val_out = lp->a.read_bcr(ioaddr, 34);
2398 2518
2399 return val_out; 2519 return val_out;
2400} 2520}
2401 2521
2402/* This routine assumes that the lp->lock is held */ 2522/* This routine assumes that the lp->lock is held */
2403static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) 2523static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2404{ 2524{
2405 struct pcnet32_private *lp = dev->priv; 2525 struct pcnet32_private *lp = dev->priv;
2406 unsigned long ioaddr = dev->base_addr; 2526 unsigned long ioaddr = dev->base_addr;
2407 2527
2408 if (!lp->mii) 2528 if (!lp->mii)
2409 return; 2529 return;
2410 2530
2411 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); 2531 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2412 lp->a.write_bcr(ioaddr, 34, val); 2532 lp->a.write_bcr(ioaddr, 34, val);
2413} 2533}
2414 2534
2415static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2535static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2416{ 2536{
2417 struct pcnet32_private *lp = dev->priv; 2537 struct pcnet32_private *lp = dev->priv;
2418 int rc; 2538 int rc;
2419 unsigned long flags; 2539 unsigned long flags;
2540
2541 /* SIOC[GS]MIIxxx ioctls */
2542 if (lp->mii) {
2543 spin_lock_irqsave(&lp->lock, flags);
2544 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2545 spin_unlock_irqrestore(&lp->lock, flags);
2546 } else {
2547 rc = -EOPNOTSUPP;
2548 }
2549
2550 return rc;
2551}
2552
2553static int pcnet32_check_otherphy(struct net_device *dev)
2554{
2555 struct pcnet32_private *lp = dev->priv;
2556 struct mii_if_info mii = lp->mii_if;
2557 u16 bmcr;
2558 int i;
2420 2559
2421 /* SIOC[GS]MIIxxx ioctls */ 2560 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2422 if (lp->mii) { 2561 if (i == lp->mii_if.phy_id)
2423 spin_lock_irqsave(&lp->lock, flags); 2562 continue; /* skip active phy */
2424 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); 2563 if (lp->phymask & (1 << i)) {
2425 spin_unlock_irqrestore(&lp->lock, flags); 2564 mii.phy_id = i;
2426 } else { 2565 if (mii_link_ok(&mii)) {
2427 rc = -EOPNOTSUPP; 2566 /* found PHY with active link */
2428 } 2567 if (netif_msg_link(lp))
2568 printk(KERN_INFO
2569 "%s: Using PHY number %d.\n",
2570 dev->name, i);
2571
2572 /* isolate inactive phy */
2573 bmcr =
2574 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2575 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2576 bmcr | BMCR_ISOLATE);
2577
2578 /* de-isolate new phy */
2579 bmcr = mdio_read(dev, i, MII_BMCR);
2580 mdio_write(dev, i, MII_BMCR,
2581 bmcr & ~BMCR_ISOLATE);
2582
2583 /* set new phy address */
2584 lp->mii_if.phy_id = i;
2585 return 1;
2586 }
2587 }
2588 }
2589 return 0;
2590}
2591
2592/*
2593 * Show the status of the media. Similar to mii_check_media however it
2594 * correctly shows the link speed for all (tested) pcnet32 variants.
2595 * Devices with no mii just report link state without speed.
2596 *
2597 * Caller is assumed to hold and release the lp->lock.
2598 */
2429 2599
2430 return rc; 2600static void pcnet32_check_media(struct net_device *dev, int verbose)
2601{
2602 struct pcnet32_private *lp = dev->priv;
2603 int curr_link;
2604 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2605 u32 bcr9;
2606
2607 if (lp->mii) {
2608 curr_link = mii_link_ok(&lp->mii_if);
2609 } else {
2610 ulong ioaddr = dev->base_addr; /* card base I/O address */
2611 curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
2612 }
2613 if (!curr_link) {
2614 if (prev_link || verbose) {
2615 netif_carrier_off(dev);
2616 if (netif_msg_link(lp))
2617 printk(KERN_INFO "%s: link down\n", dev->name);
2618 }
2619 if (lp->phycount > 1) {
2620 curr_link = pcnet32_check_otherphy(dev);
2621 prev_link = 0;
2622 }
2623 } else if (verbose || !prev_link) {
2624 netif_carrier_on(dev);
2625 if (lp->mii) {
2626 if (netif_msg_link(lp)) {
2627 struct ethtool_cmd ecmd;
2628 mii_ethtool_gset(&lp->mii_if, &ecmd);
2629 printk(KERN_INFO
2630 "%s: link up, %sMbps, %s-duplex\n",
2631 dev->name,
2632 (ecmd.speed == SPEED_100) ? "100" : "10",
2633 (ecmd.duplex ==
2634 DUPLEX_FULL) ? "full" : "half");
2635 }
2636 bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2637 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2638 if (lp->mii_if.full_duplex)
2639 bcr9 |= (1 << 0);
2640 else
2641 bcr9 &= ~(1 << 0);
2642 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2643 }
2644 } else {
2645 if (netif_msg_link(lp))
2646 printk(KERN_INFO "%s: link up\n", dev->name);
2647 }
2648 }
2431} 2649}
2432 2650
2651/*
2652 * Check for loss of link and link establishment.
2653 * Can not use mii_check_media because it does nothing if mode is forced.
2654 */
2655
2433static void pcnet32_watchdog(struct net_device *dev) 2656static void pcnet32_watchdog(struct net_device *dev)
2434{ 2657{
2435 struct pcnet32_private *lp = dev->priv; 2658 struct pcnet32_private *lp = dev->priv;
2436 unsigned long flags; 2659 unsigned long flags;
2437 2660
2438 /* Print the link status if it has changed */ 2661 /* Print the link status if it has changed */
2439 if (lp->mii) {
2440 spin_lock_irqsave(&lp->lock, flags); 2662 spin_lock_irqsave(&lp->lock, flags);
2441 mii_check_media (&lp->mii_if, netif_msg_link(lp), 0); 2663 pcnet32_check_media(dev, 0);
2442 spin_unlock_irqrestore(&lp->lock, flags); 2664 spin_unlock_irqrestore(&lp->lock, flags);
2443 }
2444 2665
2445 mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 2666 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2446} 2667}
2447 2668
2448static void __devexit pcnet32_remove_one(struct pci_dev *pdev) 2669static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2449{ 2670{
2450 struct net_device *dev = pci_get_drvdata(pdev); 2671 struct net_device *dev = pci_get_drvdata(pdev);
2451 2672
2452 if (dev) { 2673 if (dev) {
2453 struct pcnet32_private *lp = dev->priv; 2674 struct pcnet32_private *lp = dev->priv;
2454 2675
2455 unregister_netdev(dev); 2676 unregister_netdev(dev);
2456 pcnet32_free_ring(dev); 2677 pcnet32_free_ring(dev);
2457 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2678 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2458 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2679 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2459 free_netdev(dev); 2680 free_netdev(dev);
2460 pci_disable_device(pdev); 2681 pci_disable_device(pdev);
2461 pci_set_drvdata(pdev, NULL); 2682 pci_set_drvdata(pdev, NULL);
2462 } 2683 }
2463} 2684}
2464 2685
2465static struct pci_driver pcnet32_driver = { 2686static struct pci_driver pcnet32_driver = {
2466 .name = DRV_NAME, 2687 .name = DRV_NAME,
2467 .probe = pcnet32_probe_pci, 2688 .probe = pcnet32_probe_pci,
2468 .remove = __devexit_p(pcnet32_remove_one), 2689 .remove = __devexit_p(pcnet32_remove_one),
2469 .id_table = pcnet32_pci_tbl, 2690 .id_table = pcnet32_pci_tbl,
2470}; 2691};
2471 2692
2472/* An additional parameter that may be passed in... */ 2693/* An additional parameter that may be passed in... */
@@ -2477,9 +2698,11 @@ static int pcnet32_have_pci;
2477module_param(debug, int, 0); 2698module_param(debug, int, 0);
2478MODULE_PARM_DESC(debug, DRV_NAME " debug level"); 2699MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2479module_param(max_interrupt_work, int, 0); 2700module_param(max_interrupt_work, int, 0);
2480MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); 2701MODULE_PARM_DESC(max_interrupt_work,
2702 DRV_NAME " maximum events handled per interrupt");
2481module_param(rx_copybreak, int, 0); 2703module_param(rx_copybreak, int, 0);
2482MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); 2704MODULE_PARM_DESC(rx_copybreak,
2705 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
2483module_param(tx_start_pt, int, 0); 2706module_param(tx_start_pt, int, 0);
2484MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); 2707MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2485module_param(pcnet32vlb, int, 0); 2708module_param(pcnet32vlb, int, 0);
@@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0);
2490MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); 2713MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2491/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ 2714/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2492module_param_array(homepna, int, NULL, 0); 2715module_param_array(homepna, int, NULL, 0);
2493MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); 2716MODULE_PARM_DESC(homepna,
2717 DRV_NAME
2718 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
2494 2719
2495MODULE_AUTHOR("Thomas Bogendoerfer"); 2720MODULE_AUTHOR("Thomas Bogendoerfer");
2496MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); 2721MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
@@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL");
2500 2725
2501static int __init pcnet32_init_module(void) 2726static int __init pcnet32_init_module(void)
2502{ 2727{
2503 printk(KERN_INFO "%s", version); 2728 printk(KERN_INFO "%s", version);
2504 2729
2505 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); 2730 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
2506 2731
2507 if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) 2732 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
2508 tx_start = tx_start_pt; 2733 tx_start = tx_start_pt;
2509 2734
2510 /* find the PCI devices */ 2735 /* find the PCI devices */
2511 if (!pci_module_init(&pcnet32_driver)) 2736 if (!pci_module_init(&pcnet32_driver))
2512 pcnet32_have_pci = 1; 2737 pcnet32_have_pci = 1;
2513 2738
2514 /* should we find any remaining VLbus devices ? */ 2739 /* should we find any remaining VLbus devices ? */
2515 if (pcnet32vlb) 2740 if (pcnet32vlb)
2516 pcnet32_probe_vlbus(); 2741 pcnet32_probe_vlbus();
2517 2742
2518 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2743 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2519 printk(KERN_INFO PFX "%d cards_found.\n", cards_found); 2744 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
2520 2745
2521 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; 2746 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
2522} 2747}
2523 2748
2524static void __exit pcnet32_cleanup_module(void) 2749static void __exit pcnet32_cleanup_module(void)
2525{ 2750{
2526 struct net_device *next_dev; 2751 struct net_device *next_dev;
2527 2752
2528 while (pcnet32_dev) { 2753 while (pcnet32_dev) {
2529 struct pcnet32_private *lp = pcnet32_dev->priv; 2754 struct pcnet32_private *lp = pcnet32_dev->priv;
2530 next_dev = lp->next; 2755 next_dev = lp->next;
2531 unregister_netdev(pcnet32_dev); 2756 unregister_netdev(pcnet32_dev);
2532 pcnet32_free_ring(pcnet32_dev); 2757 pcnet32_free_ring(pcnet32_dev);
2533 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2758 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2534 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2759 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2535 free_netdev(pcnet32_dev); 2760 free_netdev(pcnet32_dev);
2536 pcnet32_dev = next_dev; 2761 pcnet32_dev = next_dev;
2537 } 2762 }
2538 2763
2539 if (pcnet32_have_pci) 2764 if (pcnet32_have_pci)
2540 pci_unregister_driver(&pcnet32_driver); 2765 pci_unregister_driver(&pcnet32_driver);
2541} 2766}
2542 2767
2543module_init(pcnet32_init_module); 2768module_init(pcnet32_init_module);
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a4b2b6975d6c..0784f558ca9a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc)
549static void enable_formac(struct s_smc *smc) 549static void enable_formac(struct s_smc *smc)
550{ 550{
551 /* set formac IMSK : 0 enables irq */ 551 /* set formac IMSK : 0 enables irq */
552 outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ; 552 outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
553 outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ; 553 outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
554 outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ; 554 outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
555 outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ; 555 outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
556 outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ; 556 outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
557 outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ; 557 outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
558} 558}
559 559
560#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */ 560#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 25e028b7ce48..4eda81d41b10 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,7 +44,7 @@
44#include "skge.h" 44#include "skge.h"
45 45
46#define DRV_NAME "skge" 46#define DRV_NAME "skge"
47#define DRV_VERSION "1.3" 47#define DRV_VERSION "1.4"
48#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
49 49
50#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
104static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
108 107
109static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
110{ 109{
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = {
728 * Allocate ring elements and chain them together 727 * Allocate ring elements and chain them together
729 * One-to-one association of board descriptors with ring elements 728 * One-to-one association of board descriptors with ring elements
730 */ 729 */
731static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) 730static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
732{ 731{
733 struct skge_tx_desc *d; 732 struct skge_tx_desc *d;
734 struct skge_element *e; 733 struct skge_element *e;
735 int i; 734 int i;
736 735
737 ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL); 736 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
738 if (!ring->start) 737 if (!ring->start)
739 return -ENOMEM; 738 return -ENOMEM;
740 739
741 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 740 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
742 e->desc = d; 741 e->desc = d;
743 e->skb = NULL;
744 if (i == ring->count - 1) { 742 if (i == ring->count - 1) {
745 e->next = ring->start; 743 e->next = ring->start;
746 d->next_offset = base; 744 d->next_offset = base;
@@ -2169,27 +2167,31 @@ static int skge_up(struct net_device *dev)
2169 if (!skge->mem) 2167 if (!skge->mem)
2170 return -ENOMEM; 2168 return -ENOMEM;
2171 2169
2170 BUG_ON(skge->dma & 7);
2171
2172 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2173 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
2174 err = -EINVAL;
2175 goto free_pci_mem;
2176 }
2177
2172 memset(skge->mem, 0, skge->mem_size); 2178 memset(skge->mem, 0, skge->mem_size);
2173 2179
2174 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2180 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2181 if (err)
2175 goto free_pci_mem; 2182 goto free_pci_mem;
2176 2183
2177 err = skge_rx_fill(skge); 2184 err = skge_rx_fill(skge);
2178 if (err) 2185 if (err)
2179 goto free_rx_ring; 2186 goto free_rx_ring;
2180 2187
2181 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2188 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2182 skge->dma + rx_size))) 2189 skge->dma + rx_size);
2190 if (err)
2183 goto free_rx_ring; 2191 goto free_rx_ring;
2184 2192
2185 skge->tx_avail = skge->tx_ring.count - 1; 2193 skge->tx_avail = skge->tx_ring.count - 1;
2186 2194
2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2189 hw->intr_mask |= portirqmask[port];
2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2192
2193 /* Initialize MAC */ 2195 /* Initialize MAC */
2194 spin_lock_bh(&hw->phy_lock); 2196 spin_lock_bh(&hw->phy_lock);
2195 if (hw->chip_id == CHIP_ID_GENESIS) 2197 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2248,6 @@ static int skge_down(struct net_device *dev)
2246 else 2248 else
2247 yukon_stop(skge); 2249 yukon_stop(skge);
2248 2250
2249 spin_lock_irq(&hw->hw_lock);
2250 hw->intr_mask &= ~portirqmask[skge->port];
2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2253
2254 /* Stop transmitter */ 2251 /* Stop transmitter */
2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2252 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2256 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2253 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2307,18 +2304,15 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2307 int i; 2304 int i;
2308 u32 control, len; 2305 u32 control, len;
2309 u64 map; 2306 u64 map;
2310 unsigned long flags;
2311 2307
2312 skb = skb_padto(skb, ETH_ZLEN); 2308 skb = skb_padto(skb, ETH_ZLEN);
2313 if (!skb) 2309 if (!skb)
2314 return NETDEV_TX_OK; 2310 return NETDEV_TX_OK;
2315 2311
2316 local_irq_save(flags);
2317 if (!spin_trylock(&skge->tx_lock)) { 2312 if (!spin_trylock(&skge->tx_lock)) {
2318 /* Collision - tell upper layer to requeue */ 2313 /* Collision - tell upper layer to requeue */
2319 local_irq_restore(flags); 2314 return NETDEV_TX_LOCKED;
2320 return NETDEV_TX_LOCKED; 2315 }
2321 }
2322 2316
2323 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2317 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2324 if (!netif_queue_stopped(dev)) { 2318 if (!netif_queue_stopped(dev)) {
@@ -2327,7 +2321,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2327 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 2321 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2328 dev->name); 2322 dev->name);
2329 } 2323 }
2330 spin_unlock_irqrestore(&skge->tx_lock, flags); 2324 spin_unlock(&skge->tx_lock);
2331 return NETDEV_TX_BUSY; 2325 return NETDEV_TX_BUSY;
2332 } 2326 }
2333 2327
@@ -2402,8 +2396,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2402 netif_stop_queue(dev); 2396 netif_stop_queue(dev);
2403 } 2397 }
2404 2398
2399 mmiowb();
2400 spin_unlock(&skge->tx_lock);
2401
2405 dev->trans_start = jiffies; 2402 dev->trans_start = jiffies;
2406 spin_unlock_irqrestore(&skge->tx_lock, flags);
2407 2403
2408 return NETDEV_TX_OK; 2404 return NETDEV_TX_OK;
2409} 2405}
@@ -2416,7 +2412,7 @@ static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2416 pci_unmap_addr(e, mapaddr), 2412 pci_unmap_addr(e, mapaddr),
2417 pci_unmap_len(e, maplen), 2413 pci_unmap_len(e, maplen),
2418 PCI_DMA_TODEVICE); 2414 PCI_DMA_TODEVICE);
2419 dev_kfree_skb_any(e->skb); 2415 dev_kfree_skb(e->skb);
2420 e->skb = NULL; 2416 e->skb = NULL;
2421 } else { 2417 } else {
2422 pci_unmap_page(hw->pdev, 2418 pci_unmap_page(hw->pdev,
@@ -2430,15 +2426,14 @@ static void skge_tx_clean(struct skge_port *skge)
2430{ 2426{
2431 struct skge_ring *ring = &skge->tx_ring; 2427 struct skge_ring *ring = &skge->tx_ring;
2432 struct skge_element *e; 2428 struct skge_element *e;
2433 unsigned long flags;
2434 2429
2435 spin_lock_irqsave(&skge->tx_lock, flags); 2430 spin_lock_bh(&skge->tx_lock);
2436 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2431 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2437 ++skge->tx_avail; 2432 ++skge->tx_avail;
2438 skge_tx_free(skge->hw, e); 2433 skge_tx_free(skge->hw, e);
2439 } 2434 }
2440 ring->to_clean = e; 2435 ring->to_clean = e;
2441 spin_unlock_irqrestore(&skge->tx_lock, flags); 2436 spin_unlock_bh(&skge->tx_lock);
2442} 2437}
2443 2438
2444static void skge_tx_timeout(struct net_device *dev) 2439static void skge_tx_timeout(struct net_device *dev)
@@ -2663,6 +2658,37 @@ resubmit:
2663 return NULL; 2658 return NULL;
2664} 2659}
2665 2660
2661static void skge_tx_done(struct skge_port *skge)
2662{
2663 struct skge_ring *ring = &skge->tx_ring;
2664 struct skge_element *e;
2665
2666 spin_lock(&skge->tx_lock);
2667 for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
2668 struct skge_tx_desc *td = e->desc;
2669 u32 control;
2670
2671 rmb();
2672 control = td->control;
2673 if (control & BMU_OWN)
2674 break;
2675
2676 if (unlikely(netif_msg_tx_done(skge)))
2677 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
2678 skge->netdev->name, e - ring->start, td->status);
2679
2680 skge_tx_free(skge->hw, e);
2681 e->skb = NULL;
2682 ++skge->tx_avail;
2683 }
2684 ring->to_clean = e;
2685 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2686
2687 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2688 netif_wake_queue(skge->netdev);
2689
2690 spin_unlock(&skge->tx_lock);
2691}
2666 2692
2667static int skge_poll(struct net_device *dev, int *budget) 2693static int skge_poll(struct net_device *dev, int *budget)
2668{ 2694{
@@ -2670,8 +2696,10 @@ static int skge_poll(struct net_device *dev, int *budget)
2670 struct skge_hw *hw = skge->hw; 2696 struct skge_hw *hw = skge->hw;
2671 struct skge_ring *ring = &skge->rx_ring; 2697 struct skge_ring *ring = &skge->rx_ring;
2672 struct skge_element *e; 2698 struct skge_element *e;
2673 unsigned int to_do = min(dev->quota, *budget); 2699 int to_do = min(dev->quota, *budget);
2674 unsigned int work_done = 0; 2700 int work_done = 0;
2701
2702 skge_tx_done(skge);
2675 2703
2676 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2704 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2677 struct skge_rx_desc *rd = e->desc; 2705 struct skge_rx_desc *rd = e->desc;
@@ -2683,8 +2711,8 @@ static int skge_poll(struct net_device *dev, int *budget)
2683 if (control & BMU_OWN) 2711 if (control & BMU_OWN)
2684 break; 2712 break;
2685 2713
2686 skb = skge_rx_get(skge, e, control, rd->status, 2714 skb = skge_rx_get(skge, e, control, rd->status,
2687 le16_to_cpu(rd->csum2)); 2715 le16_to_cpu(rd->csum2));
2688 if (likely(skb)) { 2716 if (likely(skb)) {
2689 dev->last_rx = jiffies; 2717 dev->last_rx = jiffies;
2690 netif_receive_skb(skb); 2718 netif_receive_skb(skb);
@@ -2705,49 +2733,15 @@ static int skge_poll(struct net_device *dev, int *budget)
2705 if (work_done >= to_do) 2733 if (work_done >= to_do)
2706 return 1; /* not done */ 2734 return 1; /* not done */
2707 2735
2708 spin_lock_irq(&hw->hw_lock); 2736 netif_rx_complete(dev);
2709 __netif_rx_complete(dev); 2737 mmiowb();
2710 hw->intr_mask |= portirqmask[skge->port]; 2738
2739 hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
2711 skge_write32(hw, B0_IMSK, hw->intr_mask); 2740 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2713 2741
2714 return 0; 2742 return 0;
2715} 2743}
2716 2744
2717static inline void skge_tx_intr(struct net_device *dev)
2718{
2719 struct skge_port *skge = netdev_priv(dev);
2720 struct skge_hw *hw = skge->hw;
2721 struct skge_ring *ring = &skge->tx_ring;
2722 struct skge_element *e;
2723
2724 spin_lock(&skge->tx_lock);
2725 for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
2726 struct skge_tx_desc *td = e->desc;
2727 u32 control;
2728
2729 rmb();
2730 control = td->control;
2731 if (control & BMU_OWN)
2732 break;
2733
2734 if (unlikely(netif_msg_tx_done(skge)))
2735 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
2736 dev->name, e - ring->start, td->status);
2737
2738 skge_tx_free(hw, e);
2739 e->skb = NULL;
2740 ++skge->tx_avail;
2741 }
2742 ring->to_clean = e;
2743 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2744
2745 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2746 netif_wake_queue(dev);
2747
2748 spin_unlock(&skge->tx_lock);
2749}
2750
2751/* Parity errors seem to happen when Genesis is connected to a switch 2745/* Parity errors seem to happen when Genesis is connected to a switch
2752 * with no other ports present. Heartbeat error?? 2746 * with no other ports present. Heartbeat error??
2753 */ 2747 */
@@ -2770,17 +2764,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2770 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2764 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2771} 2765}
2772 2766
2773static void skge_pci_clear(struct skge_hw *hw)
2774{
2775 u16 status;
2776
2777 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2778 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2779 pci_write_config_word(hw->pdev, PCI_STATUS,
2780 status | PCI_STATUS_ERROR_BITS);
2781 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2782}
2783
2784static void skge_mac_intr(struct skge_hw *hw, int port) 2767static void skge_mac_intr(struct skge_hw *hw, int port)
2785{ 2768{
2786 if (hw->chip_id == CHIP_ID_GENESIS) 2769 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2822,23 +2805,39 @@ static void skge_error_irq(struct skge_hw *hw)
2822 if (hwstatus & IS_M2_PAR_ERR) 2805 if (hwstatus & IS_M2_PAR_ERR)
2823 skge_mac_parity(hw, 1); 2806 skge_mac_parity(hw, 1);
2824 2807
2825 if (hwstatus & IS_R1_PAR_ERR) 2808 if (hwstatus & IS_R1_PAR_ERR) {
2809 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2810 hw->dev[0]->name);
2826 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 2811 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2812 }
2827 2813
2828 if (hwstatus & IS_R2_PAR_ERR) 2814 if (hwstatus & IS_R2_PAR_ERR) {
2815 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2816 hw->dev[1]->name);
2829 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 2817 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2818 }
2830 2819
2831 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 2820 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2832 printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n", 2821 u16 pci_status, pci_cmd;
2833 hwstatus); 2822
2823 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
2824 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2834 2825
2835 skge_pci_clear(hw); 2826 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
2827 pci_name(hw->pdev), pci_cmd, pci_status);
2828
2829 /* Write the error bits back to clear them. */
2830 pci_status &= PCI_STATUS_ERROR_BITS;
2831 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2832 pci_write_config_word(hw->pdev, PCI_COMMAND,
2833 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2834 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
2835 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2836 2836
2837 /* if error still set then just ignore it */ 2837 /* if error still set then just ignore it */
2838 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2838 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2839 if (hwstatus & IS_IRQ_STAT) { 2839 if (hwstatus & IS_IRQ_STAT) {
2840 pr_debug("IRQ status %x: still set ignoring hardware errors\n", 2840 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
2841 hwstatus);
2842 hw->intr_mask &= ~IS_HW_ERR; 2841 hw->intr_mask &= ~IS_HW_ERR;
2843 } 2842 }
2844 } 2843 }
@@ -2855,12 +2854,11 @@ static void skge_extirq(unsigned long data)
2855 int port; 2854 int port;
2856 2855
2857 spin_lock(&hw->phy_lock); 2856 spin_lock(&hw->phy_lock);
2858 for (port = 0; port < 2; port++) { 2857 for (port = 0; port < hw->ports; port++) {
2859 struct net_device *dev = hw->dev[port]; 2858 struct net_device *dev = hw->dev[port];
2859 struct skge_port *skge = netdev_priv(dev);
2860 2860
2861 if (dev && netif_running(dev)) { 2861 if (netif_running(dev)) {
2862 struct skge_port *skge = netdev_priv(dev);
2863
2864 if (hw->chip_id != CHIP_ID_GENESIS) 2862 if (hw->chip_id != CHIP_ID_GENESIS)
2865 yukon_phy_intr(skge); 2863 yukon_phy_intr(skge);
2866 else 2864 else
@@ -2869,38 +2867,39 @@ static void skge_extirq(unsigned long data)
2869 } 2867 }
2870 spin_unlock(&hw->phy_lock); 2868 spin_unlock(&hw->phy_lock);
2871 2869
2872 spin_lock_irq(&hw->hw_lock);
2873 hw->intr_mask |= IS_EXT_REG; 2870 hw->intr_mask |= IS_EXT_REG;
2874 skge_write32(hw, B0_IMSK, hw->intr_mask); 2871 skge_write32(hw, B0_IMSK, hw->intr_mask);
2875 spin_unlock_irq(&hw->hw_lock);
2876} 2872}
2877 2873
2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2874static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2879{ 2875{
2880 struct skge_hw *hw = dev_id; 2876 struct skge_hw *hw = dev_id;
2881 u32 status = skge_read32(hw, B0_SP_ISRC); 2877 u32 status;
2882 2878
2883 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2879 /* Reading this register masks IRQ */
2880 status = skge_read32(hw, B0_SP_ISRC);
2881 if (status == 0)
2884 return IRQ_NONE; 2882 return IRQ_NONE;
2885 2883
2886 spin_lock(&hw->hw_lock); 2884 if (status & IS_EXT_REG) {
2887 if (status & IS_R1_F) { 2885 hw->intr_mask &= ~IS_EXT_REG;
2886 tasklet_schedule(&hw->ext_tasklet);
2887 }
2888
2889 if (status & (IS_R1_F|IS_XA1_F)) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); 2890 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2889 hw->intr_mask &= ~IS_R1_F; 2891 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
2890 netif_rx_schedule(hw->dev[0]); 2892 netif_rx_schedule(hw->dev[0]);
2891 } 2893 }
2892 2894
2893 if (status & IS_R2_F) { 2895 if (status & (IS_R2_F|IS_XA2_F)) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); 2896 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2895 hw->intr_mask &= ~IS_R2_F; 2897 hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
2896 netif_rx_schedule(hw->dev[1]); 2898 netif_rx_schedule(hw->dev[1]);
2897 } 2899 }
2898 2900
2899 if (status & IS_XA1_F) 2901 if (likely((status & hw->intr_mask) == 0))
2900 skge_tx_intr(hw->dev[0]); 2902 return IRQ_HANDLED;
2901
2902 if (status & IS_XA2_F)
2903 skge_tx_intr(hw->dev[1]);
2904 2903
2905 if (status & IS_PA_TO_RX1) { 2904 if (status & IS_PA_TO_RX1) {
2906 struct skge_port *skge = netdev_priv(hw->dev[0]); 2905 struct skge_port *skge = netdev_priv(hw->dev[0]);
@@ -2929,13 +2928,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2929 if (status & IS_HW_ERR) 2928 if (status & IS_HW_ERR)
2930 skge_error_irq(hw); 2929 skge_error_irq(hw);
2931 2930
2932 if (status & IS_EXT_REG) {
2933 hw->intr_mask &= ~IS_EXT_REG;
2934 tasklet_schedule(&hw->ext_tasklet);
2935 }
2936
2937 skge_write32(hw, B0_IMSK, hw->intr_mask); 2931 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2939 2932
2940 return IRQ_HANDLED; 2933 return IRQ_HANDLED;
2941} 2934}
@@ -3010,7 +3003,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
3010static int skge_reset(struct skge_hw *hw) 3003static int skge_reset(struct skge_hw *hw)
3011{ 3004{
3012 u32 reg; 3005 u32 reg;
3013 u16 ctst; 3006 u16 ctst, pci_status;
3014 u8 t8, mac_cfg, pmd_type, phy_type; 3007 u8 t8, mac_cfg, pmd_type, phy_type;
3015 int i; 3008 int i;
3016 3009
@@ -3021,8 +3014,13 @@ static int skge_reset(struct skge_hw *hw)
3021 skge_write8(hw, B0_CTST, CS_RST_CLR); 3014 skge_write8(hw, B0_CTST, CS_RST_CLR);
3022 3015
3023 /* clear PCI errors, if any */ 3016 /* clear PCI errors, if any */
3024 skge_pci_clear(hw); 3017 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3018 skge_write8(hw, B2_TST_CTRL2, 0);
3025 3019
3020 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3021 pci_write_config_word(hw->pdev, PCI_STATUS,
3022 pci_status | PCI_STATUS_ERROR_BITS);
3023 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3026 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3024 skge_write8(hw, B0_CTST, CS_MRST_CLR);
3027 3025
3028 /* restore CLK_RUN bits (for Yukon-Lite) */ 3026 /* restore CLK_RUN bits (for Yukon-Lite) */
@@ -3081,7 +3079,10 @@ static int skge_reset(struct skge_hw *hw)
3081 else 3079 else
3082 hw->ram_size = t8 * 4096; 3080 hw->ram_size = t8 * 4096;
3083 3081
3084 hw->intr_mask = IS_HW_ERR | IS_EXT_REG; 3082 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3083 if (hw->ports > 1)
3084 hw->intr_mask |= IS_PORT_2;
3085
3085 if (hw->chip_id == CHIP_ID_GENESIS) 3086 if (hw->chip_id == CHIP_ID_GENESIS)
3086 genesis_init(hw); 3087 genesis_init(hw);
3087 else { 3088 else {
@@ -3251,13 +3252,15 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3251 struct skge_hw *hw; 3252 struct skge_hw *hw;
3252 int err, using_dac = 0; 3253 int err, using_dac = 0;
3253 3254
3254 if ((err = pci_enable_device(pdev))) { 3255 err = pci_enable_device(pdev);
3256 if (err) {
3255 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3257 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3256 pci_name(pdev)); 3258 pci_name(pdev));
3257 goto err_out; 3259 goto err_out;
3258 } 3260 }
3259 3261
3260 if ((err = pci_request_regions(pdev, DRV_NAME))) { 3262 err = pci_request_regions(pdev, DRV_NAME);
3263 if (err) {
3261 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3264 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3262 pci_name(pdev)); 3265 pci_name(pdev));
3263 goto err_out_disable_pdev; 3266 goto err_out_disable_pdev;
@@ -3265,22 +3268,18 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3265 3268
3266 pci_set_master(pdev); 3269 pci_set_master(pdev);
3267 3270
3268 if (sizeof(dma_addr_t) > sizeof(u32) && 3271 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3269 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3270 using_dac = 1; 3272 using_dac = 1;
3271 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3273 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3272 if (err < 0) { 3274 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3273 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3275 using_dac = 0;
3274 "for consistent allocations\n", pci_name(pdev)); 3276 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3275 goto err_out_free_regions; 3277 }
3276 } 3278
3277 } else { 3279 if (err) {
3278 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3280 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3279 if (err) { 3281 pci_name(pdev));
3280 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3282 goto err_out_free_regions;
3281 pci_name(pdev));
3282 goto err_out_free_regions;
3283 }
3284 } 3283 }
3285 3284
3286#ifdef __BIG_ENDIAN 3285#ifdef __BIG_ENDIAN
@@ -3304,7 +3303,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3304 3303
3305 hw->pdev = pdev; 3304 hw->pdev = pdev;
3306 spin_lock_init(&hw->phy_lock); 3305 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3306 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3309 3307
3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3308 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
@@ -3314,7 +3312,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3314 goto err_out_free_hw; 3312 goto err_out_free_hw;
3315 } 3313 }
3316 3314
3317 if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) { 3315 err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
3316 if (err) {
3318 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3317 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3319 pci_name(pdev), pdev->irq); 3318 pci_name(pdev), pdev->irq);
3320 goto err_out_iounmap; 3319 goto err_out_iounmap;
@@ -3332,7 +3331,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3332 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3331 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3333 goto err_out_led_off; 3332 goto err_out_led_off;
3334 3333
3335 if ((err = register_netdev(dev))) { 3334 err = register_netdev(dev);
3335 if (err) {
3336 printk(KERN_ERR PFX "%s: cannot register net device\n", 3336 printk(KERN_ERR PFX "%s: cannot register net device\n",
3337 pci_name(pdev)); 3337 pci_name(pdev));
3338 goto err_out_free_netdev; 3338 goto err_out_free_netdev;
@@ -3387,7 +3387,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3387 3387
3388 skge_write32(hw, B0_IMSK, 0); 3388 skge_write32(hw, B0_IMSK, 0);
3389 skge_write16(hw, B0_LED, LED_STAT_OFF); 3389 skge_write16(hw, B0_LED, LED_STAT_OFF);
3390 skge_pci_clear(hw);
3391 skge_write8(hw, B0_CTST, CS_RST_SET); 3390 skge_write8(hw, B0_CTST, CS_RST_SET);
3392 3391
3393 tasklet_kill(&hw->ext_tasklet); 3392 tasklet_kill(&hw->ext_tasklet);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 941f12a333b6..2efdacc290e5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,7 +2402,6 @@ struct skge_hw {
2402 2402
2403 struct tasklet_struct ext_tasklet; 2403 struct tasklet_struct ext_tasklet;
2404 spinlock_t phy_lock; 2404 spinlock_t phy_lock;
2405 spinlock_t hw_lock;
2406}; 2405};
2407 2406
2408enum { 2407enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 73260364cba3..f08fe6c884b2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "0.15" 54#define DRV_VERSION "1.1"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -61,10 +61,6 @@
61 * a receive requires one (or two if using 64 bit dma). 61 * a receive requires one (or two if using 64 bit dma).
62 */ 62 */
63 63
64#define is_ec_a1(hw) \
65 unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
66 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
67
68#define RX_LE_SIZE 512 64#define RX_LE_SIZE 512
69#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 65#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
70#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) 66#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
@@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256;
96module_param(copybreak, int, 0); 92module_param(copybreak, int, 0);
97MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 93MODULE_PARM_DESC(copybreak, "Receive copy threshold");
98 94
95static int disable_msi = 0;
96module_param(disable_msi, int, 0);
97MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
98
99static const struct pci_device_id sky2_id_table[] = { 99static const struct pci_device_id sky2_id_table[] = {
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
504/* Force a renegotiation */ 504/* Force a renegotiation */
505static void sky2_phy_reinit(struct sky2_port *sky2) 505static void sky2_phy_reinit(struct sky2_port *sky2)
506{ 506{
507 down(&sky2->phy_sema); 507 spin_lock_bh(&sky2->phy_lock);
508 sky2_phy_init(sky2->hw, sky2->port); 508 sky2_phy_init(sky2->hw, sky2->port);
509 up(&sky2->phy_sema); 509 spin_unlock_bh(&sky2->phy_lock);
510} 510}
511 511
512static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 512static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
571 571
572 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); 572 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
573 573
574 down(&sky2->phy_sema); 574 spin_lock_bh(&sky2->phy_lock);
575 sky2_phy_init(hw, port); 575 sky2_phy_init(hw, port);
576 up(&sky2->phy_sema); 576 spin_unlock_bh(&sky2->phy_lock);
577 577
578 /* MIB clear */ 578 /* MIB clear */
579 reg = gma_read16(hw, port, GM_PHY_ADDR); 579 reg = gma_read16(hw, port, GM_PHY_ADDR);
@@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
725 return le; 725 return le;
726} 726}
727 727
728/* 728/* Update chip's next pointer */
729 * This is a workaround code taken from SysKonnect sk98lin driver 729static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
730 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
731 */
732static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
733 u16 idx, u16 *last, u16 size)
734{ 730{
735 wmb(); 731 wmb();
736 if (is_ec_a1(hw) && idx < *last) { 732 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
737 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
738
739 if (hwget == 0) {
740 /* Start prefetching again */
741 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
742 goto setnew;
743 }
744
745 if (hwget == size - 1) {
746 /* set watermark to one list element */
747 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
748
749 /* set put index to first list element */
750 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
751 } else /* have hardware go to end of list */
752 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
753 size - 1);
754 } else {
755setnew:
756 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
757 }
758 *last = idx;
759 mmiowb(); 733 mmiowb();
760} 734}
761 735
@@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
878 if (!netif_running(dev)) 852 if (!netif_running(dev))
879 return -ENODEV; /* Phy still in reset */ 853 return -ENODEV; /* Phy still in reset */
880 854
881 switch(cmd) { 855 switch (cmd) {
882 case SIOCGMIIPHY: 856 case SIOCGMIIPHY:
883 data->phy_id = PHY_ADDR_MARV; 857 data->phy_id = PHY_ADDR_MARV;
884 858
@@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
886 case SIOCGMIIREG: { 860 case SIOCGMIIREG: {
887 u16 val = 0; 861 u16 val = 0;
888 862
889 down(&sky2->phy_sema); 863 spin_lock_bh(&sky2->phy_lock);
890 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); 864 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
891 up(&sky2->phy_sema); 865 spin_unlock_bh(&sky2->phy_lock);
892 866
893 data->val_out = val; 867 data->val_out = val;
894 break; 868 break;
@@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
898 if (!capable(CAP_NET_ADMIN)) 872 if (!capable(CAP_NET_ADMIN))
899 return -EPERM; 873 return -EPERM;
900 874
901 down(&sky2->phy_sema); 875 spin_lock_bh(&sky2->phy_lock);
902 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, 876 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
903 data->val_in); 877 data->val_in);
904 up(&sky2->phy_sema); 878 spin_unlock_bh(&sky2->phy_lock);
905 break; 879 break;
906 } 880 }
907 return err; 881 return err;
@@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
1001 975
1002 /* Tell chip about available buffers */ 976 /* Tell chip about available buffers */
1003 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 977 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1004 sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
1005 return 0; 978 return 0;
1006nomem: 979nomem:
1007 sky2_rx_clean(sky2); 980 sky2_rx_clean(sky2);
@@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev)
1014 struct sky2_port *sky2 = netdev_priv(dev); 987 struct sky2_port *sky2 = netdev_priv(dev);
1015 struct sky2_hw *hw = sky2->hw; 988 struct sky2_hw *hw = sky2->hw;
1016 unsigned port = sky2->port; 989 unsigned port = sky2->port;
1017 u32 ramsize, rxspace; 990 u32 ramsize, rxspace, imask;
1018 int err = -ENOMEM; 991 int err = -ENOMEM;
1019 992
1020 if (netif_msg_ifup(sky2)) 993 if (netif_msg_ifup(sky2))
@@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev)
1079 goto err_out; 1052 goto err_out;
1080 1053
1081 /* Enable interrupts from phy/mac for port */ 1054 /* Enable interrupts from phy/mac for port */
1082 spin_lock_irq(&hw->hw_lock); 1055 imask = sky2_read32(hw, B0_IMSK);
1083 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1056 imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1084 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1057 sky2_write32(hw, B0_IMSK, imask);
1085 spin_unlock_irq(&hw->hw_lock); 1058
1086 return 0; 1059 return 0;
1087 1060
1088err_out: 1061err_out:
@@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1299 netif_stop_queue(dev); 1272 netif_stop_queue(dev);
1300 } 1273 }
1301 1274
1302 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, 1275 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1303 &sky2->tx_last_put, TX_RING_SIZE);
1304 1276
1305out_unlock: 1277out_unlock:
1306 spin_unlock(&sky2->tx_lock); 1278 spin_unlock(&sky2->tx_lock);
@@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1332 struct tx_ring_info *re = sky2->tx_ring + put; 1304 struct tx_ring_info *re = sky2->tx_ring + put;
1333 struct sk_buff *skb = re->skb; 1305 struct sk_buff *skb = re->skb;
1334 1306
1335 nxt = re->idx; 1307 nxt = re->idx;
1336 BUG_ON(nxt >= TX_RING_SIZE); 1308 BUG_ON(nxt >= TX_RING_SIZE);
1337 prefetch(sky2->tx_ring + nxt); 1309 prefetch(sky2->tx_ring + nxt);
1338 1310
@@ -1348,7 +1320,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1348 struct tx_ring_info *fre; 1320 struct tx_ring_info *fre;
1349 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; 1321 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
1350 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), 1322 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1351 skb_shinfo(skb)->frags[i].size, 1323 skb_shinfo(skb)->frags[i].size,
1352 PCI_DMA_TODEVICE); 1324 PCI_DMA_TODEVICE);
1353 } 1325 }
1354 1326
@@ -1356,7 +1328,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1356 } 1328 }
1357 1329
1358 sky2->tx_cons = put; 1330 sky2->tx_cons = put;
1359 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) 1331 if (tx_avail(sky2) > MAX_SKB_TX_LE)
1360 netif_wake_queue(dev); 1332 netif_wake_queue(dev);
1361} 1333}
1362 1334
@@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev)
1375 struct sky2_hw *hw = sky2->hw; 1347 struct sky2_hw *hw = sky2->hw;
1376 unsigned port = sky2->port; 1348 unsigned port = sky2->port;
1377 u16 ctrl; 1349 u16 ctrl;
1350 u32 imask;
1378 1351
1379 /* Never really got started! */ 1352 /* Never really got started! */
1380 if (!sky2->tx_le) 1353 if (!sky2->tx_le)
@@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev)
1386 /* Stop more packets from being queued */ 1359 /* Stop more packets from being queued */
1387 netif_stop_queue(dev); 1360 netif_stop_queue(dev);
1388 1361
1389 /* Disable port IRQ */
1390 spin_lock_irq(&hw->hw_lock);
1391 hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1392 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1393 spin_unlock_irq(&hw->hw_lock);
1394
1395 flush_scheduled_work();
1396
1397 sky2_phy_reset(hw, port); 1362 sky2_phy_reset(hw, port);
1398 1363
1399 /* Stop transmitter */ 1364 /* Stop transmitter */
@@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev)
1437 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1402 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1438 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1403 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1439 1404
1405 /* Disable port IRQ */
1406 imask = sky2_read32(hw, B0_IMSK);
1407 imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
1408 sky2_write32(hw, B0_IMSK, imask);
1409
1440 /* turn off LED's */ 1410 /* turn off LED's */
1441 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1411 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1442 1412
@@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1631 return 0; 1601 return 0;
1632} 1602}
1633 1603
1634/* 1604/* Interrupt from PHY */
1635 * Interrupt from PHY are handled outside of interrupt context 1605static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1636 * because accessing phy registers requires spin wait which might
1637 * cause excess interrupt latency.
1638 */
1639static void sky2_phy_task(void *arg)
1640{ 1606{
1641 struct sky2_port *sky2 = arg; 1607 struct net_device *dev = hw->dev[port];
1642 struct sky2_hw *hw = sky2->hw; 1608 struct sky2_port *sky2 = netdev_priv(dev);
1643 u16 istatus, phystat; 1609 u16 istatus, phystat;
1644 1610
1645 down(&sky2->phy_sema); 1611 spin_lock(&sky2->phy_lock);
1646 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT); 1612 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1647 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT); 1613 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1614
1615 if (!netif_running(dev))
1616 goto out;
1648 1617
1649 if (netif_msg_intr(sky2)) 1618 if (netif_msg_intr(sky2))
1650 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", 1619 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
@@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg)
1670 sky2_link_down(sky2); 1639 sky2_link_down(sky2);
1671 } 1640 }
1672out: 1641out:
1673 up(&sky2->phy_sema); 1642 spin_unlock(&sky2->phy_lock);
1674
1675 spin_lock_irq(&hw->hw_lock);
1676 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1677 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1678 spin_unlock_irq(&hw->hw_lock);
1679} 1643}
1680 1644
1681 1645
@@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev)
1687 struct sky2_port *sky2 = netdev_priv(dev); 1651 struct sky2_port *sky2 = netdev_priv(dev);
1688 struct sky2_hw *hw = sky2->hw; 1652 struct sky2_hw *hw = sky2->hw;
1689 unsigned txq = txqaddr[sky2->port]; 1653 unsigned txq = txqaddr[sky2->port];
1690 u16 ridx; 1654 u16 report, done;
1691
1692 /* Maybe we just missed an status interrupt */
1693 spin_lock(&sky2->tx_lock);
1694 ridx = sky2_read16(hw,
1695 sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1696 sky2_tx_complete(sky2, ridx);
1697 spin_unlock(&sky2->tx_lock);
1698
1699 if (!netif_queue_stopped(dev)) {
1700 if (net_ratelimit())
1701 pr_info(PFX "transmit interrupt missed? recovered\n");
1702 return;
1703 }
1704 1655
1705 if (netif_msg_timer(sky2)) 1656 if (netif_msg_timer(sky2))
1706 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1657 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1707 1658
1708 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1659 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1709 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1660 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1710 1661
1711 sky2_tx_clean(sky2); 1662 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1663 dev->name,
1664 sky2->tx_cons, sky2->tx_prod, report, done);
1712 1665
1713 sky2_qset(hw, txq); 1666 if (report != done) {
1714 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); 1667 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
1668
1669 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1670 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1671 } else if (report != sky2->tx_cons) {
1672 printk(KERN_INFO PFX "status report lost?\n");
1673
1674 spin_lock_bh(&sky2->tx_lock);
1675 sky2_tx_complete(sky2, report);
1676 spin_unlock_bh(&sky2->tx_lock);
1677 } else {
1678 printk(KERN_INFO PFX "hardware hung? flushing\n");
1679
1680 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1681 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1682
1683 sky2_tx_clean(sky2);
1684
1685 sky2_qset(hw, txq);
1686 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1687 }
1715} 1688}
1716 1689
1717 1690
@@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1730 struct sky2_hw *hw = sky2->hw; 1703 struct sky2_hw *hw = sky2->hw;
1731 int err; 1704 int err;
1732 u16 ctl, mode; 1705 u16 ctl, mode;
1706 u32 imask;
1733 1707
1734 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1708 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1735 return -EINVAL; 1709 return -EINVAL;
@@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1742 return 0; 1716 return 0;
1743 } 1717 }
1744 1718
1719 imask = sky2_read32(hw, B0_IMSK);
1745 sky2_write32(hw, B0_IMSK, 0); 1720 sky2_write32(hw, B0_IMSK, 0);
1746 1721
1747 dev->trans_start = jiffies; /* prevent tx timeout */ 1722 dev->trans_start = jiffies; /* prevent tx timeout */
1748 netif_stop_queue(dev); 1723 netif_stop_queue(dev);
1749 netif_poll_disable(hw->dev[0]); 1724 netif_poll_disable(hw->dev[0]);
1750 1725
1726 synchronize_irq(hw->pdev->irq);
1727
1751 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); 1728 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1752 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); 1729 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1753 sky2_rx_stop(sky2); 1730 sky2_rx_stop(sky2);
@@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1766 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); 1743 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1767 1744
1768 err = sky2_rx_start(sky2); 1745 err = sky2_rx_start(sky2);
1769 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1746 sky2_write32(hw, B0_IMSK, imask);
1770 1747
1771 if (err) 1748 if (err)
1772 dev_close(dev); 1749 dev_close(dev);
@@ -1843,8 +1820,7 @@ resubmit:
1843 sky2_rx_add(sky2, re->mapaddr); 1820 sky2_rx_add(sky2, re->mapaddr);
1844 1821
1845 /* Tell receiver about new buffers. */ 1822 /* Tell receiver about new buffers. */
1846 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put, 1823 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
1847 &sky2->rx_last_put, RX_LE_SIZE);
1848 1824
1849 return skb; 1825 return skb;
1850 1826
@@ -1871,76 +1847,51 @@ error:
1871 goto resubmit; 1847 goto resubmit;
1872} 1848}
1873 1849
1874/* 1850/* Transmit complete */
1875 * Check for transmit complete 1851static inline void sky2_tx_done(struct net_device *dev, u16 last)
1876 */
1877#define TX_NO_STATUS 0xffff
1878
1879static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
1880{ 1852{
1881 if (last != TX_NO_STATUS) { 1853 struct sky2_port *sky2 = netdev_priv(dev);
1882 struct net_device *dev = hw->dev[port];
1883 if (dev && netif_running(dev)) {
1884 struct sky2_port *sky2 = netdev_priv(dev);
1885 1854
1886 spin_lock(&sky2->tx_lock); 1855 if (netif_running(dev)) {
1887 sky2_tx_complete(sky2, last); 1856 spin_lock(&sky2->tx_lock);
1888 spin_unlock(&sky2->tx_lock); 1857 sky2_tx_complete(sky2, last);
1889 } 1858 spin_unlock(&sky2->tx_lock);
1890 } 1859 }
1891} 1860}
1892 1861
1893/* 1862/* Process status response ring */
1894 * Both ports share the same status interrupt, therefore there is only 1863static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1895 * one poll routine.
1896 */
1897static int sky2_poll(struct net_device *dev0, int *budget)
1898{ 1864{
1899 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; 1865 int work_done = 0;
1900 unsigned int to_do = min(dev0->quota, *budget);
1901 unsigned int work_done = 0;
1902 u16 hwidx;
1903 u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
1904
1905 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1906
1907 /*
1908 * Kick the STAT_LEV_TIMER_CTRL timer.
1909 * This fixes my hangs on Yukon-EC (0xb6) rev 1.
1910 * The if clause is there to start the timer only if it has been
1911 * configured correctly and not been disabled via ethtool.
1912 */
1913 if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
1914 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
1915 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
1916 }
1917 1866
1918 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1919 BUG_ON(hwidx >= STATUS_RING_SIZE);
1920 rmb(); 1867 rmb();
1921 1868
1922 while (hwidx != hw->st_idx) { 1869 for(;;) {
1923 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1870 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1924 struct net_device *dev; 1871 struct net_device *dev;
1925 struct sky2_port *sky2; 1872 struct sky2_port *sky2;
1926 struct sk_buff *skb; 1873 struct sk_buff *skb;
1927 u32 status; 1874 u32 status;
1928 u16 length; 1875 u16 length;
1876 u8 link, opcode;
1877
1878 opcode = le->opcode;
1879 if (!opcode)
1880 break;
1881 opcode &= ~HW_OWNER;
1929 1882
1930 le = hw->st_le + hw->st_idx;
1931 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1883 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1932 prefetch(hw->st_le + hw->st_idx); 1884 le->opcode = 0;
1933 1885
1934 BUG_ON(le->link >= 2); 1886 link = le->link;
1935 dev = hw->dev[le->link]; 1887 BUG_ON(link >= 2);
1936 if (dev == NULL || !netif_running(dev)) 1888 dev = hw->dev[link];
1937 continue;
1938 1889
1939 sky2 = netdev_priv(dev); 1890 sky2 = netdev_priv(dev);
1940 status = le32_to_cpu(le->status); 1891 length = le->length;
1941 length = le16_to_cpu(le->length); 1892 status = le->status;
1942 1893
1943 switch (le->opcode & ~HW_OWNER) { 1894 switch (opcode) {
1944 case OP_RXSTAT: 1895 case OP_RXSTAT:
1945 skb = sky2_receive(sky2, length, status); 1896 skb = sky2_receive(sky2, length, status);
1946 if (!skb) 1897 if (!skb)
@@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget)
1980 1931
1981 case OP_TXINDEXLE: 1932 case OP_TXINDEXLE:
1982 /* TX index reports status for both ports */ 1933 /* TX index reports status for both ports */
1983 tx_done[0] = status & 0xffff; 1934 sky2_tx_done(hw->dev[0], status & 0xffff);
1984 tx_done[1] = ((status >> 24) & 0xff) 1935 if (hw->dev[1])
1985 | (u16)(length & 0xf) << 8; 1936 sky2_tx_done(hw->dev[1],
1937 ((status >> 24) & 0xff)
1938 | (u16)(length & 0xf) << 8);
1986 break; 1939 break;
1987 1940
1988 default: 1941 default:
1989 if (net_ratelimit()) 1942 if (net_ratelimit())
1990 printk(KERN_WARNING PFX 1943 printk(KERN_WARNING PFX
1991 "unknown status opcode 0x%x\n", le->opcode); 1944 "unknown status opcode 0x%x\n", opcode);
1992 break; 1945 break;
1993 } 1946 }
1994 } 1947 }
1995 1948
1996exit_loop: 1949exit_loop:
1997 sky2_tx_check(hw, 0, tx_done[0]); 1950 return work_done;
1998 sky2_tx_check(hw, 1, tx_done[1]);
1999
2000 if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
2001 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2002 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2003 }
2004
2005 if (likely(work_done < to_do)) {
2006 spin_lock_irq(&hw->hw_lock);
2007 __netif_rx_complete(dev0);
2008
2009 hw->intr_mask |= Y2_IS_STAT_BMU;
2010 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2011 spin_unlock_irq(&hw->hw_lock);
2012
2013 return 0;
2014 } else {
2015 *budget -= work_done;
2016 dev0->quota -= work_done;
2017 return 1;
2018 }
2019} 1951}
2020 1952
2021static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) 1953static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
@@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2134 } 2066 }
2135} 2067}
2136 2068
2137static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) 2069/* This should never happen it is a fatal situation */
2070static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2071 const char *rxtx, u32 mask)
2138{ 2072{
2139 struct net_device *dev = hw->dev[port]; 2073 struct net_device *dev = hw->dev[port];
2140 struct sky2_port *sky2 = netdev_priv(dev); 2074 struct sky2_port *sky2 = netdev_priv(dev);
2075 u32 imask;
2076
2077 printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
2078 dev ? dev->name : "<not registered>", rxtx);
2141 2079
2142 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); 2080 imask = sky2_read32(hw, B0_IMSK);
2143 sky2_write32(hw, B0_IMSK, hw->intr_mask); 2081 imask &= ~mask;
2082 sky2_write32(hw, B0_IMSK, imask);
2144 2083
2145 schedule_work(&sky2->phy_task); 2084 if (dev) {
2085 spin_lock(&sky2->phy_lock);
2086 sky2_link_down(sky2);
2087 spin_unlock(&sky2->phy_lock);
2088 }
2146} 2089}
2147 2090
2148static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) 2091static int sky2_poll(struct net_device *dev0, int *budget)
2149{ 2092{
2150 struct sky2_hw *hw = dev_id; 2093 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
2151 struct net_device *dev0 = hw->dev[0]; 2094 int work_limit = min(dev0->quota, *budget);
2152 u32 status; 2095 int work_done = 0;
2096 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2153 2097
2154 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 2098 if (unlikely(status & ~Y2_IS_STAT_BMU)) {
2155 if (status == 0 || status == ~0) 2099 if (status & Y2_IS_HW_ERR)
2156 return IRQ_NONE; 2100 sky2_hw_intr(hw);
2157 2101
2158 spin_lock(&hw->hw_lock); 2102 if (status & Y2_IS_IRQ_PHY1)
2159 if (status & Y2_IS_HW_ERR) 2103 sky2_phy_intr(hw, 0);
2160 sky2_hw_intr(hw);
2161 2104
2162 /* Do NAPI for Rx and Tx status */ 2105 if (status & Y2_IS_IRQ_PHY2)
2163 if (status & Y2_IS_STAT_BMU) { 2106 sky2_phy_intr(hw, 1);
2164 hw->intr_mask &= ~Y2_IS_STAT_BMU;
2165 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2166 2107
2167 if (likely(__netif_rx_schedule_prep(dev0))) { 2108 if (status & Y2_IS_IRQ_MAC1)
2168 prefetch(&hw->st_le[hw->st_idx]); 2109 sky2_mac_intr(hw, 0);
2169 __netif_rx_schedule(dev0); 2110
2170 } 2111 if (status & Y2_IS_IRQ_MAC2)
2112 sky2_mac_intr(hw, 1);
2113
2114 if (status & Y2_IS_CHK_RX1)
2115 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2116
2117 if (status & Y2_IS_CHK_RX2)
2118 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2119
2120 if (status & Y2_IS_CHK_TXA1)
2121 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2122
2123 if (status & Y2_IS_CHK_TXA2)
2124 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2171 } 2125 }
2172 2126
2173 if (status & Y2_IS_IRQ_PHY1) 2127 if (status & Y2_IS_STAT_BMU) {
2174 sky2_phy_intr(hw, 0); 2128 work_done = sky2_status_intr(hw, work_limit);
2129 *budget -= work_done;
2130 dev0->quota -= work_done;
2131
2132 if (work_done >= work_limit)
2133 return 1;
2175 2134
2176 if (status & Y2_IS_IRQ_PHY2) 2135 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2177 sky2_phy_intr(hw, 1); 2136 }
2178 2137
2179 if (status & Y2_IS_IRQ_MAC1) 2138 netif_rx_complete(dev0);
2180 sky2_mac_intr(hw, 0);
2181 2139
2182 if (status & Y2_IS_IRQ_MAC2) 2140 status = sky2_read32(hw, B0_Y2_SP_LISR);
2183 sky2_mac_intr(hw, 1); 2141 return 0;
2142}
2184 2143
2185 sky2_write32(hw, B0_Y2_SP_ICR, 2); 2144static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2145{
2146 struct sky2_hw *hw = dev_id;
2147 struct net_device *dev0 = hw->dev[0];
2148 u32 status;
2186 2149
2187 spin_unlock(&hw->hw_lock); 2150 /* Reading this mask interrupts as side effect */
2151 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2152 if (status == 0 || status == ~0)
2153 return IRQ_NONE;
2154
2155 prefetch(&hw->st_le[hw->st_idx]);
2156 if (likely(__netif_rx_schedule_prep(dev0)))
2157 __netif_rx_schedule(dev0);
2158 else
2159 printk(KERN_DEBUG PFX "irq race detected\n");
2188 2160
2189 return IRQ_HANDLED; 2161 return IRQ_HANDLED;
2190} 2162}
@@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw)
2238 return -EOPNOTSUPP; 2210 return -EOPNOTSUPP;
2239 } 2211 }
2240 2212
2213 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2214
2215 /* This rev is really old, and requires untested workarounds */
2216 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2217 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
2218 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2219 hw->chip_id, hw->chip_rev);
2220 return -EOPNOTSUPP;
2221 }
2222
2223 /* This chip is new and not tested yet */
2224 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
2225 pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
2226 pci_name(hw->pdev));
2227 pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
2228 }
2229
2241 /* disable ASF */ 2230 /* disable ASF */
2242 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2231 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2243 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2232 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw)
2258 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2247 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2259 2248
2260 /* clear any PEX errors */ 2249 /* clear any PEX errors */
2261 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) 2250 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2262 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2251 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2263 2252
2264 2253
@@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw)
2271 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 2260 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2272 ++hw->ports; 2261 ++hw->ports;
2273 } 2262 }
2274 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2275 2263
2276 sky2_set_power_state(hw, PCI_D0); 2264 sky2_set_power_state(hw, PCI_D0);
2277 2265
@@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw)
2337 /* Set the list last index */ 2325 /* Set the list last index */
2338 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); 2326 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2339 2327
2340 /* These status setup values are copied from SysKonnect's driver */ 2328 sky2_write16(hw, STAT_TX_IDX_TH, 10);
2341 if (is_ec_a1(hw)) { 2329 sky2_write8(hw, STAT_FIFO_WM, 16);
2342 /* WA for dev. #4.3 */
2343 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
2344
2345 /* set Status-FIFO watermark */
2346 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
2347 2330
2348 /* set Status-FIFO ISR watermark */ 2331 /* set Status-FIFO ISR watermark */
2349 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */ 2332 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2350 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000)); 2333 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2351 } else { 2334 else
2352 sky2_write16(hw, STAT_TX_IDX_TH, 10); 2335 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2353 sky2_write8(hw, STAT_FIFO_WM, 16);
2354
2355 /* set Status-FIFO ISR watermark */
2356 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2357 sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2358 else
2359 sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2360 2336
2361 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); 2337 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2362 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); 2338 sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2363 } 2339 sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2364 2340
2365 /* enable status unit */ 2341 /* enable status unit */
2366 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); 2342 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
@@ -2743,7 +2719,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2743 ms = data * 1000; 2719 ms = data * 1000;
2744 2720
2745 /* save initial values */ 2721 /* save initial values */
2746 down(&sky2->phy_sema); 2722 spin_lock_bh(&sky2->phy_lock);
2747 if (hw->chip_id == CHIP_ID_YUKON_XL) { 2723 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2748 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 2724 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2749 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 2725 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
@@ -2759,9 +2735,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2759 sky2_led(hw, port, onoff); 2735 sky2_led(hw, port, onoff);
2760 onoff = !onoff; 2736 onoff = !onoff;
2761 2737
2762 up(&sky2->phy_sema); 2738 spin_unlock_bh(&sky2->phy_lock);
2763 interrupted = msleep_interruptible(250); 2739 interrupted = msleep_interruptible(250);
2764 down(&sky2->phy_sema); 2740 spin_lock_bh(&sky2->phy_lock);
2765 2741
2766 ms -= 250; 2742 ms -= 250;
2767 } 2743 }
@@ -2776,7 +2752,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2776 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 2752 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2777 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 2753 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2778 } 2754 }
2779 up(&sky2->phy_sema); 2755 spin_unlock_bh(&sky2->phy_lock);
2780 2756
2781 return 0; 2757 return 0;
2782} 2758}
@@ -2806,38 +2782,6 @@ static int sky2_set_pauseparam(struct net_device *dev,
2806 return err; 2782 return err;
2807} 2783}
2808 2784
2809#ifdef CONFIG_PM
2810static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2811{
2812 struct sky2_port *sky2 = netdev_priv(dev);
2813
2814 wol->supported = WAKE_MAGIC;
2815 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2816}
2817
2818static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2819{
2820 struct sky2_port *sky2 = netdev_priv(dev);
2821 struct sky2_hw *hw = sky2->hw;
2822
2823 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2824 return -EOPNOTSUPP;
2825
2826 sky2->wol = wol->wolopts == WAKE_MAGIC;
2827
2828 if (sky2->wol) {
2829 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2830
2831 sky2_write16(hw, WOL_CTRL_STAT,
2832 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2833 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2834 } else
2835 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2836
2837 return 0;
2838}
2839#endif
2840
2841static int sky2_get_coalesce(struct net_device *dev, 2785static int sky2_get_coalesce(struct net_device *dev,
2842 struct ethtool_coalesce *ecmd) 2786 struct ethtool_coalesce *ecmd)
2843{ 2787{
@@ -2878,19 +2822,11 @@ static int sky2_set_coalesce(struct net_device *dev,
2878{ 2822{
2879 struct sky2_port *sky2 = netdev_priv(dev); 2823 struct sky2_port *sky2 = netdev_priv(dev);
2880 struct sky2_hw *hw = sky2->hw; 2824 struct sky2_hw *hw = sky2->hw;
2881 const u32 tmin = sky2_clk2us(hw, 1); 2825 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
2882 const u32 tmax = 5000;
2883
2884 if (ecmd->tx_coalesce_usecs != 0 &&
2885 (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax))
2886 return -EINVAL;
2887
2888 if (ecmd->rx_coalesce_usecs != 0 &&
2889 (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax))
2890 return -EINVAL;
2891 2826
2892 if (ecmd->rx_coalesce_usecs_irq != 0 && 2827 if (ecmd->tx_coalesce_usecs > tmax ||
2893 (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) 2828 ecmd->rx_coalesce_usecs > tmax ||
2829 ecmd->rx_coalesce_usecs_irq > tmax)
2894 return -EINVAL; 2830 return -EINVAL;
2895 2831
2896 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) 2832 if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
@@ -3025,10 +2961,6 @@ static struct ethtool_ops sky2_ethtool_ops = {
3025 .set_ringparam = sky2_set_ringparam, 2961 .set_ringparam = sky2_set_ringparam,
3026 .get_pauseparam = sky2_get_pauseparam, 2962 .get_pauseparam = sky2_get_pauseparam,
3027 .set_pauseparam = sky2_set_pauseparam, 2963 .set_pauseparam = sky2_set_pauseparam,
3028#ifdef CONFIG_PM
3029 .get_wol = sky2_get_wol,
3030 .set_wol = sky2_set_wol,
3031#endif
3032 .phys_id = sky2_phys_id, 2964 .phys_id = sky2_phys_id,
3033 .get_stats_count = sky2_get_stats_count, 2965 .get_stats_count = sky2_get_stats_count,
3034 .get_ethtool_stats = sky2_get_ethtool_stats, 2966 .get_ethtool_stats = sky2_get_ethtool_stats,
@@ -3082,16 +3014,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3082 sky2->speed = -1; 3014 sky2->speed = -1;
3083 sky2->advertising = sky2_supported_modes(hw); 3015 sky2->advertising = sky2_supported_modes(hw);
3084 3016
3085 /* Receive checksum disabled for Yukon XL 3017 /* Receive checksum disabled for Yukon XL
3086 * because of observed problems with incorrect 3018 * because of observed problems with incorrect
3087 * values when multiple packets are received in one interrupt 3019 * values when multiple packets are received in one interrupt
3088 */ 3020 */
3089 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); 3021 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
3090 3022
3091 INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2); 3023 spin_lock_init(&sky2->phy_lock);
3092 init_MUTEX(&sky2->phy_sema);
3093 sky2->tx_pending = TX_DEF_PENDING; 3024 sky2->tx_pending = TX_DEF_PENDING;
3094 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING; 3025 sky2->rx_pending = RX_DEF_PENDING;
3095 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); 3026 sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
3096 3027
3097 hw->dev[port] = dev; 3028 hw->dev[port] = dev;
@@ -3133,6 +3064,66 @@ static void __devinit sky2_show_addr(struct net_device *dev)
3133 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 3064 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3134} 3065}
3135 3066
3067/* Handle software interrupt used during MSI test */
3068static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
3069 struct pt_regs *regs)
3070{
3071 struct sky2_hw *hw = dev_id;
3072 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3073
3074 if (status == 0)
3075 return IRQ_NONE;
3076
3077 if (status & Y2_IS_IRQ_SW) {
3078 hw->msi_detected = 1;
3079 wake_up(&hw->msi_wait);
3080 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3081 }
3082 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3083
3084 return IRQ_HANDLED;
3085}
3086
3087/* Test interrupt path by forcing a a software IRQ */
3088static int __devinit sky2_test_msi(struct sky2_hw *hw)
3089{
3090 struct pci_dev *pdev = hw->pdev;
3091 int err;
3092
3093 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
3094
3095 err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
3096 if (err) {
3097 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3098 pci_name(pdev), pdev->irq);
3099 return err;
3100 }
3101
3102 init_waitqueue_head (&hw->msi_wait);
3103
3104 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3105 wmb();
3106
3107 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
3108
3109 if (!hw->msi_detected) {
3110 /* MSI test failed, go back to INTx mode */
3111 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
3112 "switching to INTx mode. Please report this failure to "
3113 "the PCI maintainer and include system chipset information.\n",
3114 pci_name(pdev));
3115
3116 err = -EOPNOTSUPP;
3117 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
3118 }
3119
3120 sky2_write32(hw, B0_IMSK, 0);
3121
3122 free_irq(pdev->irq, hw);
3123
3124 return err;
3125}
3126
3136static int __devinit sky2_probe(struct pci_dev *pdev, 3127static int __devinit sky2_probe(struct pci_dev *pdev,
3137 const struct pci_device_id *ent) 3128 const struct pci_device_id *ent)
3138{ 3129{
@@ -3201,7 +3192,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3201 goto err_out_free_hw; 3192 goto err_out_free_hw;
3202 } 3193 }
3203 hw->pm_cap = pm_cap; 3194 hw->pm_cap = pm_cap;
3204 spin_lock_init(&hw->hw_lock);
3205 3195
3206#ifdef __BIG_ENDIAN 3196#ifdef __BIG_ENDIAN
3207 /* byte swap descriptors in hardware */ 3197 /* byte swap descriptors in hardware */
@@ -3254,21 +3244,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3254 } 3244 }
3255 } 3245 }
3256 3246
3257 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); 3247 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3248 err = sky2_test_msi(hw);
3249 if (err == -EOPNOTSUPP)
3250 pci_disable_msi(pdev);
3251 else if (err)
3252 goto err_out_unregister;
3253 }
3254
3255 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
3258 if (err) { 3256 if (err) {
3259 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3257 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3260 pci_name(pdev), pdev->irq); 3258 pci_name(pdev), pdev->irq);
3261 goto err_out_unregister; 3259 goto err_out_unregister;
3262 } 3260 }
3263 3261
3264 hw->intr_mask = Y2_IS_BASE; 3262 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3265 sky2_write32(hw, B0_IMSK, hw->intr_mask);
3266 3263
3267 pci_set_drvdata(pdev, hw); 3264 pci_set_drvdata(pdev, hw);
3268 3265
3269 return 0; 3266 return 0;
3270 3267
3271err_out_unregister: 3268err_out_unregister:
3269 pci_disable_msi(pdev);
3272 if (dev1) { 3270 if (dev1) {
3273 unregister_netdev(dev1); 3271 unregister_netdev(dev1);
3274 free_netdev(dev1); 3272 free_netdev(dev1);
@@ -3311,6 +3309,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3311 sky2_read8(hw, B0_CTST); 3309 sky2_read8(hw, B0_CTST);
3312 3310
3313 free_irq(pdev->irq, hw); 3311 free_irq(pdev->irq, hw);
3312 pci_disable_msi(pdev);
3314 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 3313 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3315 pci_release_regions(pdev); 3314 pci_release_regions(pdev);
3316 pci_disable_device(pdev); 3315 pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index dce955c76f3c..d63cd5a1b71c 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -278,13 +278,11 @@ enum {
278 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ 278 Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
279 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ 279 Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
280 280
281 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU | 281 Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
282 Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY | 282 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
283 Y2_IS_IRQ_SW | Y2_IS_TIMINT, 283 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
284 Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | 284 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
285 Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1, 285 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
286 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
287 Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
288}; 286};
289 287
290/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 288/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -1832,6 +1830,7 @@ struct sky2_port {
1832 struct net_device *netdev; 1830 struct net_device *netdev;
1833 unsigned port; 1831 unsigned port;
1834 u32 msg_enable; 1832 u32 msg_enable;
1833 spinlock_t phy_lock;
1835 1834
1836 spinlock_t tx_lock ____cacheline_aligned_in_smp; 1835 spinlock_t tx_lock ____cacheline_aligned_in_smp;
1837 struct tx_ring_info *tx_ring; 1836 struct tx_ring_info *tx_ring;
@@ -1840,7 +1839,6 @@ struct sky2_port {
1840 u16 tx_prod; /* next le to use */ 1839 u16 tx_prod; /* next le to use */
1841 u32 tx_addr64; 1840 u32 tx_addr64;
1842 u16 tx_pending; 1841 u16 tx_pending;
1843 u16 tx_last_put;
1844 u16 tx_last_mss; 1842 u16 tx_last_mss;
1845 1843
1846 struct ring_info *rx_ring ____cacheline_aligned_in_smp; 1844 struct ring_info *rx_ring ____cacheline_aligned_in_smp;
@@ -1849,7 +1847,6 @@ struct sky2_port {
1849 u16 rx_next; /* next re to check */ 1847 u16 rx_next; /* next re to check */
1850 u16 rx_put; /* next le index to use */ 1848 u16 rx_put; /* next le index to use */
1851 u16 rx_pending; 1849 u16 rx_pending;
1852 u16 rx_last_put;
1853 u16 rx_bufsize; 1850 u16 rx_bufsize;
1854#ifdef SKY2_VLAN_TAG_USED 1851#ifdef SKY2_VLAN_TAG_USED
1855 u16 rx_tag; 1852 u16 rx_tag;
@@ -1865,20 +1862,15 @@ struct sky2_port {
1865 u8 rx_pause; 1862 u8 rx_pause;
1866 u8 tx_pause; 1863 u8 tx_pause;
1867 u8 rx_csum; 1864 u8 rx_csum;
1868 u8 wol;
1869 1865
1870 struct net_device_stats net_stats; 1866 struct net_device_stats net_stats;
1871 1867
1872 struct work_struct phy_task;
1873 struct semaphore phy_sema;
1874}; 1868};
1875 1869
1876struct sky2_hw { 1870struct sky2_hw {
1877 void __iomem *regs; 1871 void __iomem *regs;
1878 struct pci_dev *pdev; 1872 struct pci_dev *pdev;
1879 struct net_device *dev[2]; 1873 struct net_device *dev[2];
1880 spinlock_t hw_lock;
1881 u32 intr_mask;
1882 1874
1883 int pm_cap; 1875 int pm_cap;
1884 u8 chip_id; 1876 u8 chip_id;
@@ -1889,6 +1881,8 @@ struct sky2_hw {
1889 struct sky2_status_le *st_le; 1881 struct sky2_status_le *st_le;
1890 u32 st_idx; 1882 u32 st_idx;
1891 dma_addr_t st_dma; 1883 dma_addr_t st_dma;
1884 int msi_detected;
1885 wait_queue_head_t msi_wait;
1892}; 1886};
1893 1887
1894/* Register accessor for memory mapped device */ 1888/* Register accessor for memory mapped device */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 75e9b3b910cc..0e9833adf9fe 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -215,15 +215,12 @@ struct smc_local {
215 215
216 spinlock_t lock; 216 spinlock_t lock;
217 217
218#ifdef SMC_CAN_USE_DATACS
219 u32 __iomem *datacs;
220#endif
221
222#ifdef SMC_USE_PXA_DMA 218#ifdef SMC_USE_PXA_DMA
223 /* DMA needs the physical address of the chip */ 219 /* DMA needs the physical address of the chip */
224 u_long physaddr; 220 u_long physaddr;
225#endif 221#endif
226 void __iomem *base; 222 void __iomem *base;
223 void __iomem *datacs;
227}; 224};
228 225
229#if SMC_DEBUG > 0 226#if SMC_DEBUG > 0
@@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev)
2104 * Set the appropriate byte/word mode. 2101 * Set the appropriate byte/word mode.
2105 */ 2102 */
2106 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; 2103 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
2107#ifndef SMC_CAN_USE_16BIT 2104 if (!SMC_CAN_USE_16BIT)
2108 ecsr |= ECSR_IOIS8; 2105 ecsr |= ECSR_IOIS8;
2109#endif
2110 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); 2106 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2111 local_irq_restore(flags); 2107 local_irq_restore(flags);
2112 2108
@@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev)
2143 release_mem_region(res->start, ATTRIB_SIZE); 2139 release_mem_region(res->start, ATTRIB_SIZE);
2144} 2140}
2145 2141
2146#ifdef SMC_CAN_USE_DATACS 2142static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2147static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2148{ 2143{
2149 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); 2144 if (SMC_CAN_USE_DATACS) {
2150 struct smc_local *lp = netdev_priv(ndev); 2145 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2146 struct smc_local *lp = netdev_priv(ndev);
2151 2147
2152 if (!res) 2148 if (!res)
2153 return; 2149 return;
2154 2150
2155 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { 2151 if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
2156 printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); 2152 printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
2157 return; 2153 return;
2158 } 2154 }
2159 2155
2160 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); 2156 lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
2157 }
2161} 2158}
2162 2159
2163static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) 2160static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
2164{ 2161{
2165 struct smc_local *lp = netdev_priv(ndev); 2162 if (SMC_CAN_USE_DATACS) {
2166 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); 2163 struct smc_local *lp = netdev_priv(ndev);
2164 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2167 2165
2168 if (lp->datacs) 2166 if (lp->datacs)
2169 iounmap(lp->datacs); 2167 iounmap(lp->datacs);
2170 2168
2171 lp->datacs = NULL; 2169 lp->datacs = NULL;
2172 2170
2173 if (res) 2171 if (res)
2174 release_mem_region(res->start, SMC_DATA_EXTENT); 2172 release_mem_region(res->start, SMC_DATA_EXTENT);
2173 }
2175} 2174}
2176#else
2177static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {}
2178static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {}
2179#endif
2180 2175
2181/* 2176/*
2182 * smc_init(void) 2177 * smc_init(void)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e0efd1964e72..e1be1af51201 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
275#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l) 275#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l)
276#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; }) 276#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; })
277 277
278static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) 278#define SMC_outsw LPD7A40X_SMC_outsw
279
280static inline void LPD7A40X_SMC_outsw(unsigned long a, int r,
281 unsigned char* p, int l)
279{ 282{
280 unsigned short* ps = (unsigned short*) p; 283 unsigned short* ps = (unsigned short*) p;
281 while (l-- > 0) { 284 while (l-- > 0) {
@@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
342 345
343#endif 346#endif
344 347
345#ifndef SMC_IRQ_FLAGS
346#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
347#endif
348
349#ifdef SMC_USE_PXA_DMA 348#ifdef SMC_USE_PXA_DMA
350/* 349/*
351 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 350 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
@@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs)
441#endif /* SMC_USE_PXA_DMA */ 440#endif /* SMC_USE_PXA_DMA */
442 441
443 442
444/* Because of bank switching, the LAN91x uses only 16 I/O ports */ 443/*
444 * Everything a particular hardware setup needs should have been defined
445 * at this point. Add stubs for the undefined cases, mainly to avoid
446 * compilation warnings since they'll be optimized away, or to prevent buggy
447 * use of them.
448 */
449
450#if ! SMC_CAN_USE_32BIT
451#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
452#define SMC_outl(x, ioaddr, reg) BUG()
453#define SMC_insl(a, r, p, l) BUG()
454#define SMC_outsl(a, r, p, l) BUG()
455#endif
456
457#if !defined(SMC_insl) || !defined(SMC_outsl)
458#define SMC_insl(a, r, p, l) BUG()
459#define SMC_outsl(a, r, p, l) BUG()
460#endif
461
462#if ! SMC_CAN_USE_16BIT
463
464/*
465 * Any 16-bit access is performed with two 8-bit accesses if the hardware
466 * can't do it directly. Most registers are 16-bit so those are mandatory.
467 */
468#define SMC_outw(x, ioaddr, reg) \
469 do { \
470 unsigned int __val16 = (x); \
471 SMC_outb( __val16, ioaddr, reg ); \
472 SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
473 } while (0)
474#define SMC_inw(ioaddr, reg) \
475 ({ \
476 unsigned int __val16; \
477 __val16 = SMC_inb( ioaddr, reg ); \
478 __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
479 __val16; \
480 })
481
482#define SMC_insw(a, r, p, l) BUG()
483#define SMC_outsw(a, r, p, l) BUG()
484
485#endif
486
487#if !defined(SMC_insw) || !defined(SMC_outsw)
488#define SMC_insw(a, r, p, l) BUG()
489#define SMC_outsw(a, r, p, l) BUG()
490#endif
491
492#if ! SMC_CAN_USE_8BIT
493#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
494#define SMC_outb(x, ioaddr, reg) BUG()
495#define SMC_insb(a, r, p, l) BUG()
496#define SMC_outsb(a, r, p, l) BUG()
497#endif
498
499#if !defined(SMC_insb) || !defined(SMC_outsb)
500#define SMC_insb(a, r, p, l) BUG()
501#define SMC_outsb(a, r, p, l) BUG()
502#endif
503
504#ifndef SMC_CAN_USE_DATACS
505#define SMC_CAN_USE_DATACS 0
506#endif
507
445#ifndef SMC_IO_SHIFT 508#ifndef SMC_IO_SHIFT
446#define SMC_IO_SHIFT 0 509#define SMC_IO_SHIFT 0
447#endif 510#endif
511
512#ifndef SMC_IRQ_FLAGS
513#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
514#endif
515
516#ifndef SMC_INTERRUPT_PREAMBLE
517#define SMC_INTERRUPT_PREAMBLE
518#endif
519
520
521/* Because of bank switching, the LAN91x uses only 16 I/O ports */
448#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) 522#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
449#define SMC_DATA_EXTENT (4) 523#define SMC_DATA_EXTENT (4)
450 524
@@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = {
817 * Note: the following macros do *not* select the bank -- this must 891 * Note: the following macros do *not* select the bank -- this must
818 * be done separately as needed in the main code. The SMC_REG() macro 892 * be done separately as needed in the main code. The SMC_REG() macro
819 * only uses the bank argument for debugging purposes (when enabled). 893 * only uses the bank argument for debugging purposes (when enabled).
894 *
895 * Note: despite inline functions being safer, everything leading to this
896 * should preferably be macros to let BUG() display the line number in
897 * the core source code since we're interested in the top call site
898 * not in any inline function location.
820 */ 899 */
821 900
822#if SMC_DEBUG > 0 901#if SMC_DEBUG > 0
@@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = {
834#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) 913#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT)
835#endif 914#endif
836 915
837#if SMC_CAN_USE_8BIT 916/*
838#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG ) 917 * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
839#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG ) 918 * aligned to a 32 bit boundary. I tell you that does exist!
840#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG ) 919 * Fortunately the affected register accesses can be easily worked around
841#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG ) 920 * since we can write zeroes to the preceeding 16 bits without adverse
842#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG ) 921 * effects and use a 32-bit access.
843#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG ) 922 *
844#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG ) 923 * Enforce it on any 32-bit capable setup for now.
845#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG ) 924 */
846#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG ) 925#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT
847#else 926
848#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF) 927#define SMC_GET_PN() \
849#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG ) 928 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \
850#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8) 929 : (SMC_inw(ioaddr, PN_REG) & 0xFF) )
851#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF) 930
852#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8) 931#define SMC_SET_PN(x) \
853#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF) 932 do { \
933 if (SMC_MUST_ALIGN_WRITE) \
934 SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \
935 else if (SMC_CAN_USE_8BIT) \
936 SMC_outb(x, ioaddr, PN_REG); \
937 else \
938 SMC_outw(x, ioaddr, PN_REG); \
939 } while (0)
940
941#define SMC_GET_AR() \
942 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \
943 : (SMC_inw(ioaddr, PN_REG) >> 8) )
944
945#define SMC_GET_TXFIFO() \
946 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \
947 : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) )
948
949#define SMC_GET_RXFIFO() \
950 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \
951 : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) )
952
953#define SMC_GET_INT() \
954 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \
955 : (SMC_inw(ioaddr, INT_REG) & 0xFF) )
956
854#define SMC_ACK_INT(x) \ 957#define SMC_ACK_INT(x) \
855 do { \ 958 do { \
856 unsigned long __flags; \ 959 if (SMC_CAN_USE_8BIT) \
857 int __mask; \ 960 SMC_outb(x, ioaddr, INT_REG); \
858 local_irq_save(__flags); \ 961 else { \
859 __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ 962 unsigned long __flags; \
860 SMC_outw( __mask | (x), ioaddr, INT_REG ); \ 963 int __mask; \
861 local_irq_restore(__flags); \ 964 local_irq_save(__flags); \
965 __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
966 SMC_outw( __mask | (x), ioaddr, INT_REG ); \
967 local_irq_restore(__flags); \
968 } \
969 } while (0)
970
971#define SMC_GET_INT_MASK() \
972 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \
973 : (SMC_inw( ioaddr, INT_REG ) >> 8) )
974
975#define SMC_SET_INT_MASK(x) \
976 do { \
977 if (SMC_CAN_USE_8BIT) \
978 SMC_outb(x, ioaddr, IM_REG); \
979 else \
980 SMC_outw((x) << 8, ioaddr, INT_REG); \
981 } while (0)
982
983#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT)
984
985#define SMC_SELECT_BANK(x) \
986 do { \
987 if (SMC_MUST_ALIGN_WRITE) \
988 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
989 else \
990 SMC_outw(x, ioaddr, BANK_SELECT); \
991 } while (0)
992
993#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG)
994
995#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG)
996
997#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG)
998
999#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG)
1000
1001#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG)
1002
1003#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG)
1004
1005#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG)
1006
1007#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG)
1008
1009#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG)
1010
1011#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG)
1012
1013#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG)
1014
1015#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG)
1016
1017#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG)
1018
1019#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG)
1020
1021#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG)
1022
1023#define SMC_SET_PTR(x) \
1024 do { \
1025 if (SMC_MUST_ALIGN_WRITE) \
1026 SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \
1027 else \
1028 SMC_outw(x, ioaddr, PTR_REG); \
862 } while (0) 1029 } while (0)
863#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8)
864#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG )
865#endif
866 1030
867#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT ) 1031#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG)
868#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT ) 1032
869#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG ) 1033#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG)
870#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG ) 1034
871#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG ) 1035#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG)
872#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG ) 1036
873#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG ) 1037#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG)
874#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG ) 1038
875#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG ) 1039#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG)
876#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG ) 1040
877#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG ) 1041#define SMC_SET_RPC(x) \
878#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG ) 1042 do { \
879#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG ) 1043 if (SMC_MUST_ALIGN_WRITE) \
880#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG ) 1044 SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \
881#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG ) 1045 else \
882#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) 1046 SMC_outw(x, ioaddr, RPC_REG); \
883#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) 1047 } while (0)
884#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) 1048
885#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG ) 1049#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG)
886#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) 1050
887#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) 1051#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG)
888#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
889#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG )
890#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG )
891#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG )
892#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG )
893 1052
894#ifndef SMC_GET_MAC_ADDR 1053#ifndef SMC_GET_MAC_ADDR
895#define SMC_GET_MAC_ADDR(addr) \ 1054#define SMC_GET_MAC_ADDR(addr) \
@@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = {
920 SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ 1079 SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \
921 } while (0) 1080 } while (0)
922 1081
923#if SMC_CAN_USE_32BIT
924/*
925 * Some setups just can't write 8 or 16 bits reliably when not aligned
926 * to a 32 bit boundary. I tell you that exists!
927 * We re-do the ones here that can be easily worked around if they can have
928 * their low parts written to 0 without adverse effects.
929 */
930#undef SMC_SELECT_BANK
931#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT )
932#undef SMC_SET_RPC
933#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) )
934#undef SMC_SET_PN
935#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) )
936#undef SMC_SET_PTR
937#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) )
938#endif
939
940#if SMC_CAN_USE_32BIT
941#define SMC_PUT_PKT_HDR(status, length) \
942 SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG )
943#define SMC_GET_PKT_HDR(status, length) \
944 do { \
945 unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \
946 (status) = __val & 0xffff; \
947 (length) = __val >> 16; \
948 } while (0)
949#else
950#define SMC_PUT_PKT_HDR(status, length) \ 1082#define SMC_PUT_PKT_HDR(status, length) \
951 do { \ 1083 do { \
952 SMC_outw( status, ioaddr, DATA_REG ); \ 1084 if (SMC_CAN_USE_32BIT) \
953 SMC_outw( length, ioaddr, DATA_REG ); \ 1085 SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \
954 } while (0) 1086 else { \
955#define SMC_GET_PKT_HDR(status, length) \ 1087 SMC_outw(status, ioaddr, DATA_REG); \
956 do { \ 1088 SMC_outw(length, ioaddr, DATA_REG); \
957 (status) = SMC_inw( ioaddr, DATA_REG ); \ 1089 } \
958 (length) = SMC_inw( ioaddr, DATA_REG ); \
959 } while (0) 1090 } while (0)
960#endif
961 1091
962#if SMC_CAN_USE_32BIT 1092#define SMC_GET_PKT_HDR(status, length) \
963#define _SMC_PUSH_DATA(p, l) \
964 do { \ 1093 do { \
965 char *__ptr = (p); \ 1094 if (SMC_CAN_USE_32BIT) { \
966 int __len = (l); \ 1095 unsigned int __val = SMC_inl(ioaddr, DATA_REG); \
967 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1096 (status) = __val & 0xffff; \
968 __len -= 2; \ 1097 (length) = __val >> 16; \
969 SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \ 1098 } else { \
970 __ptr += 2; \ 1099 (status) = SMC_inw(ioaddr, DATA_REG); \
971 } \ 1100 (length) = SMC_inw(ioaddr, DATA_REG); \
972 SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \
973 if (__len & 2) { \
974 __ptr += (__len & ~3); \
975 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
976 } \ 1101 } \
977 } while (0) 1102 } while (0)
978#define _SMC_PULL_DATA(p, l) \
979 do { \
980 char *__ptr = (p); \
981 int __len = (l); \
982 if ((unsigned long)__ptr & 2) { \
983 /* \
984 * We want 32bit alignment here. \
985 * Since some buses perform a full 32bit \
986 * fetch even for 16bit data we can't use \
987 * SMC_inw() here. Back both source (on chip \
988 * and destination) pointers of 2 bytes. \
989 */ \
990 __ptr -= 2; \
991 __len += 2; \
992 SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
993 } \
994 __len += 2; \
995 SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \
996 } while (0)
997#elif SMC_CAN_USE_16BIT
998#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 )
999#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 )
1000#elif SMC_CAN_USE_8BIT
1001#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l )
1002#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l )
1003#endif
1004 1103
1005#if ! SMC_CAN_USE_16BIT 1104#define SMC_PUSH_DATA(p, l) \
1006#define SMC_outw(x, ioaddr, reg) \
1007 do { \ 1105 do { \
1008 unsigned int __val16 = (x); \ 1106 if (SMC_CAN_USE_32BIT) { \
1009 SMC_outb( __val16, ioaddr, reg ); \ 1107 void *__ptr = (p); \
1010 SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ 1108 int __len = (l); \
1109 void *__ioaddr = ioaddr; \
1110 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1111 __len -= 2; \
1112 SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
1113 __ptr += 2; \
1114 } \
1115 if (SMC_CAN_USE_DATACS && lp->datacs) \
1116 __ioaddr = lp->datacs; \
1117 SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \
1118 if (__len & 2) { \
1119 __ptr += (__len & ~3); \
1120 SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \
1121 } \
1122 } else if (SMC_CAN_USE_16BIT) \
1123 SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \
1124 else if (SMC_CAN_USE_8BIT) \
1125 SMC_outsb(ioaddr, DATA_REG, p, l); \
1011 } while (0) 1126 } while (0)
1012#define SMC_inw(ioaddr, reg) \
1013 ({ \
1014 unsigned int __val16; \
1015 __val16 = SMC_inb( ioaddr, reg ); \
1016 __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
1017 __val16; \
1018 })
1019#endif
1020
1021#ifdef SMC_CAN_USE_DATACS
1022#define SMC_PUSH_DATA(p, l) \
1023 if ( lp->datacs ) { \
1024 unsigned char *__ptr = (p); \
1025 int __len = (l); \
1026 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1027 __len -= 2; \
1028 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
1029 __ptr += 2; \
1030 } \
1031 outsl(lp->datacs, __ptr, __len >> 2); \
1032 if (__len & 2) { \
1033 __ptr += (__len & ~3); \
1034 SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
1035 } \
1036 } else { \
1037 _SMC_PUSH_DATA(p, l); \
1038 }
1039 1127
1040#define SMC_PULL_DATA(p, l) \ 1128#define SMC_PULL_DATA(p, l) \
1041 if ( lp->datacs ) { \ 1129 do { \
1042 unsigned char *__ptr = (p); \ 1130 if (SMC_CAN_USE_32BIT) { \
1043 int __len = (l); \ 1131 void *__ptr = (p); \
1044 if ((unsigned long)__ptr & 2) { \ 1132 int __len = (l); \
1045 /* \ 1133 void *__ioaddr = ioaddr; \
1046 * We want 32bit alignment here. \ 1134 if ((unsigned long)__ptr & 2) { \
1047 * Since some buses perform a full 32bit \ 1135 /* \
1048 * fetch even for 16bit data we can't use \ 1136 * We want 32bit alignment here. \
1049 * SMC_inw() here. Back both source (on chip \ 1137 * Since some buses perform a full \
1050 * and destination) pointers of 2 bytes. \ 1138 * 32bit fetch even for 16bit data \
1051 */ \ 1139 * we can't use SMC_inw() here. \
1052 __ptr -= 2; \ 1140 * Back both source (on-chip) and \
1141 * destination pointers of 2 bytes. \
1142 * This is possible since the call to \
1143 * SMC_GET_PKT_HDR() already advanced \
1144 * the source pointer of 4 bytes, and \
1145 * the skb_reserve(skb, 2) advanced \
1146 * the destination pointer of 2 bytes. \
1147 */ \
1148 __ptr -= 2; \
1149 __len += 2; \
1150 SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1151 } \
1152 if (SMC_CAN_USE_DATACS && lp->datacs) \
1153 __ioaddr = lp->datacs; \
1053 __len += 2; \ 1154 __len += 2; \
1054 SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ 1155 SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \
1055 } \ 1156 } else if (SMC_CAN_USE_16BIT) \
1056 __len += 2; \ 1157 SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \
1057 insl( lp->datacs, __ptr, __len >> 2); \ 1158 else if (SMC_CAN_USE_8BIT) \
1058 } else { \ 1159 SMC_insb(ioaddr, DATA_REG, p, l); \
1059 _SMC_PULL_DATA(p, l); \ 1160 } while (0)
1060 }
1061#else
1062#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l)
1063#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l)
1064#endif
1065
1066#if !defined (SMC_INTERRUPT_PREAMBLE)
1067# define SMC_INTERRUPT_PREAMBLE
1068#endif
1069 1161
1070#endif /* _SMC91X_H_ */ 1162#endif /* _SMC91X_H_ */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e03d1ae50c3e..88829eb9568e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.52" 72#define DRV_MODULE_VERSION "3.53"
73#define DRV_MODULE_RELDATE "Mar 06, 2006" 73#define DRV_MODULE_RELDATE "Mar 22, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -1148,6 +1148,19 @@ static int tg3_halt_cpu(struct tg3 *, u32);
1148static int tg3_nvram_lock(struct tg3 *); 1148static int tg3_nvram_lock(struct tg3 *);
1149static void tg3_nvram_unlock(struct tg3 *); 1149static void tg3_nvram_unlock(struct tg3 *);
1150 1150
1151static void tg3_power_down_phy(struct tg3 *tp)
1152{
1153 /* The PHY should not be powered down on some chips because
1154 * of bugs.
1155 */
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1158 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1159 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1160 return;
1161 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1162}
1163
1151static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 1164static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1152{ 1165{
1153 u32 misc_host_ctrl; 1166 u32 misc_host_ctrl;
@@ -1327,8 +1340,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1327 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1340 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1328 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1341 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1329 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1342 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1330 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) 1343 tg3_power_down_phy(tp);
1331 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1332 } 1344 }
1333 } 1345 }
1334 1346
@@ -9436,12 +9448,18 @@ static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9436 return NULL; 9448 return NULL;
9437} 9449}
9438 9450
9439/* Since this function may be called in D3-hot power state during
9440 * tg3_init_one(), only config cycles are allowed.
9441 */
9442static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) 9451static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9443{ 9452{
9444 u32 val; 9453 u32 val;
9454 u16 pmcsr;
9455
9456 /* On some early chips the SRAM cannot be accessed in D3hot state,
9457 * so need make sure we're in D0.
9458 */
9459 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9460 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9461 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9462 msleep(1);
9445 9463
9446 /* Make sure register accesses (indirect or otherwise) 9464 /* Make sure register accesses (indirect or otherwise)
9447 * will function correctly. 9465 * will function correctly.