diff options
Diffstat (limited to 'drivers/net/pcnet32.c')
-rw-r--r-- | drivers/net/pcnet32.c | 4155 |
1 files changed, 2156 insertions, 1999 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 1bc3f5bffb95..861361018640 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -26,8 +26,8 @@ | |||
26 | #define DRV_RELDATE "18.Mar.2006" | 26 | #define DRV_RELDATE "18.Mar.2006" |
27 | #define PFX DRV_NAME ": " | 27 | #define PFX DRV_NAME ": " |
28 | 28 | ||
29 | static const char * const version = | 29 | static const char *const version = |
30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | 30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; |
31 | 31 | ||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | |||
58 | * PCI device identifiers for "new style" Linux PCI Device Drivers | 58 | * PCI device identifiers for "new style" Linux PCI Device Drivers |
59 | */ | 59 | */ |
60 | static struct pci_device_id pcnet32_pci_tbl[] = { | 60 | static struct pci_device_id pcnet32_pci_tbl[] = { |
61 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 61 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, |
62 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 62 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
63 | /* | 63 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, |
64 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have | 64 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
65 | * the incorrect vendor id. | 65 | |
66 | */ | 66 | /* |
67 | { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, | 67 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have |
68 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 }, | 68 | * the incorrect vendor id. |
69 | { 0, } | 69 | */ |
70 | { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, | ||
71 | PCI_ANY_ID, PCI_ANY_ID, | ||
72 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0}, | ||
73 | |||
74 | { } /* terminate list */ | ||
70 | }; | 75 | }; |
71 | 76 | ||
72 | MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl); | 77 | MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); |
73 | 78 | ||
74 | static int cards_found; | 79 | static int cards_found; |
75 | 80 | ||
@@ -77,13 +82,11 @@ static int cards_found; | |||
77 | * VLB I/O addresses | 82 | * VLB I/O addresses |
78 | */ | 83 | */ |
79 | static unsigned int pcnet32_portlist[] __initdata = | 84 | static unsigned int pcnet32_portlist[] __initdata = |
80 | { 0x300, 0x320, 0x340, 0x360, 0 }; | 85 | { 0x300, 0x320, 0x340, 0x360, 0 }; |
81 | |||
82 | |||
83 | 86 | ||
84 | static int pcnet32_debug = 0; | 87 | static int pcnet32_debug = 0; |
85 | static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ | 88 | static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ |
86 | static int pcnet32vlb; /* check for VLB cards ? */ | 89 | static int pcnet32vlb; /* check for VLB cards ? */ |
87 | 90 | ||
88 | static struct net_device *pcnet32_dev; | 91 | static struct net_device *pcnet32_dev; |
89 | 92 | ||
@@ -110,32 +113,34 @@ static int rx_copybreak = 200; | |||
110 | * to internal options | 113 | * to internal options |
111 | */ | 114 | */ |
112 | static const unsigned char options_mapping[] = { | 115 | static const unsigned char options_mapping[] = { |
113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ | 116 | PCNET32_PORT_ASEL, /* 0 Auto-select */ |
114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ | 117 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ |
115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ | 118 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ |
116 | PCNET32_PORT_ASEL, /* 3 not supported */ | 119 | PCNET32_PORT_ASEL, /* 3 not supported */ |
117 | PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ | 120 | PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ |
118 | PCNET32_PORT_ASEL, /* 5 not supported */ | 121 | PCNET32_PORT_ASEL, /* 5 not supported */ |
119 | PCNET32_PORT_ASEL, /* 6 not supported */ | 122 | PCNET32_PORT_ASEL, /* 6 not supported */ |
120 | PCNET32_PORT_ASEL, /* 7 not supported */ | 123 | PCNET32_PORT_ASEL, /* 7 not supported */ |
121 | PCNET32_PORT_ASEL, /* 8 not supported */ | 124 | PCNET32_PORT_ASEL, /* 8 not supported */ |
122 | PCNET32_PORT_MII, /* 9 MII 10baseT */ | 125 | PCNET32_PORT_MII, /* 9 MII 10baseT */ |
123 | PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ | 126 | PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ |
124 | PCNET32_PORT_MII, /* 11 MII (autosel) */ | 127 | PCNET32_PORT_MII, /* 11 MII (autosel) */ |
125 | PCNET32_PORT_10BT, /* 12 10BaseT */ | 128 | PCNET32_PORT_10BT, /* 12 10BaseT */ |
126 | PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ | 129 | PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ |
127 | PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ | 130 | /* 14 MII 100BaseTx-FD */ |
128 | PCNET32_PORT_ASEL /* 15 not supported */ | 131 | PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, |
132 | PCNET32_PORT_ASEL /* 15 not supported */ | ||
129 | }; | 133 | }; |
130 | 134 | ||
131 | static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { | 135 | static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { |
132 | "Loopback test (offline)" | 136 | "Loopback test (offline)" |
133 | }; | 137 | }; |
138 | |||
134 | #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) | 139 | #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) |
135 | 140 | ||
136 | #define PCNET32_NUM_REGS 136 | 141 | #define PCNET32_NUM_REGS 136 |
137 | 142 | ||
138 | #define MAX_UNITS 8 /* More are supported, limit only on options */ | 143 | #define MAX_UNITS 8 /* More are supported, limit only on options */ |
139 | static int options[MAX_UNITS]; | 144 | static int options[MAX_UNITS]; |
140 | static int full_duplex[MAX_UNITS]; | 145 | static int full_duplex[MAX_UNITS]; |
141 | static int homepna[MAX_UNITS]; | 146 | static int homepna[MAX_UNITS]; |
@@ -270,7 +275,6 @@ static int homepna[MAX_UNITS]; | |||
270 | * Philippe Seewer assisted with auto negotiation and testing. | 275 | * Philippe Seewer assisted with auto negotiation and testing. |
271 | */ | 276 | */ |
272 | 277 | ||
273 | |||
274 | /* | 278 | /* |
275 | * Set the number of Tx and Rx buffers, using Log_2(# buffers). | 279 | * Set the number of Tx and Rx buffers, using Log_2(# buffers). |
276 | * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. | 280 | * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. |
@@ -306,42 +310,42 @@ static int homepna[MAX_UNITS]; | |||
306 | 310 | ||
307 | /* The PCNET32 Rx and Tx ring descriptors. */ | 311 | /* The PCNET32 Rx and Tx ring descriptors. */ |
308 | struct pcnet32_rx_head { | 312 | struct pcnet32_rx_head { |
309 | u32 base; | 313 | u32 base; |
310 | s16 buf_length; | 314 | s16 buf_length; |
311 | s16 status; | 315 | s16 status; |
312 | u32 msg_length; | 316 | u32 msg_length; |
313 | u32 reserved; | 317 | u32 reserved; |
314 | }; | 318 | }; |
315 | 319 | ||
316 | struct pcnet32_tx_head { | 320 | struct pcnet32_tx_head { |
317 | u32 base; | 321 | u32 base; |
318 | s16 length; | 322 | s16 length; |
319 | s16 status; | 323 | s16 status; |
320 | u32 misc; | 324 | u32 misc; |
321 | u32 reserved; | 325 | u32 reserved; |
322 | }; | 326 | }; |
323 | 327 | ||
324 | /* The PCNET32 32-Bit initialization block, described in databook. */ | 328 | /* The PCNET32 32-Bit initialization block, described in databook. */ |
325 | struct pcnet32_init_block { | 329 | struct pcnet32_init_block { |
326 | u16 mode; | 330 | u16 mode; |
327 | u16 tlen_rlen; | 331 | u16 tlen_rlen; |
328 | u8 phys_addr[6]; | 332 | u8 phys_addr[6]; |
329 | u16 reserved; | 333 | u16 reserved; |
330 | u32 filter[2]; | 334 | u32 filter[2]; |
331 | /* Receive and transmit ring base, along with extra bits. */ | 335 | /* Receive and transmit ring base, along with extra bits. */ |
332 | u32 rx_ring; | 336 | u32 rx_ring; |
333 | u32 tx_ring; | 337 | u32 tx_ring; |
334 | }; | 338 | }; |
335 | 339 | ||
336 | /* PCnet32 access functions */ | 340 | /* PCnet32 access functions */ |
337 | struct pcnet32_access { | 341 | struct pcnet32_access { |
338 | u16 (*read_csr)(unsigned long, int); | 342 | u16 (*read_csr) (unsigned long, int); |
339 | void (*write_csr)(unsigned long, int, u16); | 343 | void (*write_csr) (unsigned long, int, u16); |
340 | u16 (*read_bcr)(unsigned long, int); | 344 | u16 (*read_bcr) (unsigned long, int); |
341 | void (*write_bcr)(unsigned long, int, u16); | 345 | void (*write_bcr) (unsigned long, int, u16); |
342 | u16 (*read_rap)(unsigned long); | 346 | u16 (*read_rap) (unsigned long); |
343 | void (*write_rap)(unsigned long, u16); | 347 | void (*write_rap) (unsigned long, u16); |
344 | void (*reset)(unsigned long); | 348 | void (*reset) (unsigned long); |
345 | }; | 349 | }; |
346 | 350 | ||
347 | /* | 351 | /* |
@@ -349,771 +353,794 @@ struct pcnet32_access { | |||
349 | * so the structure should be allocated using pci_alloc_consistent(). | 353 | * so the structure should be allocated using pci_alloc_consistent(). |
350 | */ | 354 | */ |
351 | struct pcnet32_private { | 355 | struct pcnet32_private { |
352 | struct pcnet32_init_block init_block; | 356 | struct pcnet32_init_block init_block; |
353 | /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ | 357 | /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ |
354 | struct pcnet32_rx_head *rx_ring; | 358 | struct pcnet32_rx_head *rx_ring; |
355 | struct pcnet32_tx_head *tx_ring; | 359 | struct pcnet32_tx_head *tx_ring; |
356 | dma_addr_t dma_addr; /* DMA address of beginning of this | 360 | dma_addr_t dma_addr; /* DMA address of beginning of this |
357 | object, returned by | 361 | object, returned by |
358 | pci_alloc_consistent */ | 362 | pci_alloc_consistent */ |
359 | struct pci_dev *pci_dev; /* Pointer to the associated pci device | 363 | struct pci_dev *pci_dev; /* Pointer to the associated pci device |
360 | structure */ | 364 | structure */ |
361 | const char *name; | 365 | const char *name; |
362 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 366 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
363 | struct sk_buff **tx_skbuff; | 367 | struct sk_buff **tx_skbuff; |
364 | struct sk_buff **rx_skbuff; | 368 | struct sk_buff **rx_skbuff; |
365 | dma_addr_t *tx_dma_addr; | 369 | dma_addr_t *tx_dma_addr; |
366 | dma_addr_t *rx_dma_addr; | 370 | dma_addr_t *rx_dma_addr; |
367 | struct pcnet32_access a; | 371 | struct pcnet32_access a; |
368 | spinlock_t lock; /* Guard lock */ | 372 | spinlock_t lock; /* Guard lock */ |
369 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | 373 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ |
370 | unsigned int rx_ring_size; /* current rx ring size */ | 374 | unsigned int rx_ring_size; /* current rx ring size */ |
371 | unsigned int tx_ring_size; /* current tx ring size */ | 375 | unsigned int tx_ring_size; /* current tx ring size */ |
372 | unsigned int rx_mod_mask; /* rx ring modular mask */ | 376 | unsigned int rx_mod_mask; /* rx ring modular mask */ |
373 | unsigned int tx_mod_mask; /* tx ring modular mask */ | 377 | unsigned int tx_mod_mask; /* tx ring modular mask */ |
374 | unsigned short rx_len_bits; | 378 | unsigned short rx_len_bits; |
375 | unsigned short tx_len_bits; | 379 | unsigned short tx_len_bits; |
376 | dma_addr_t rx_ring_dma_addr; | 380 | dma_addr_t rx_ring_dma_addr; |
377 | dma_addr_t tx_ring_dma_addr; | 381 | dma_addr_t tx_ring_dma_addr; |
378 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | 382 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ |
379 | struct net_device_stats stats; | 383 | struct net_device_stats stats; |
380 | char tx_full; | 384 | char tx_full; |
381 | char phycount; /* number of phys found */ | 385 | char phycount; /* number of phys found */ |
382 | int options; | 386 | int options; |
383 | unsigned int shared_irq:1, /* shared irq possible */ | 387 | unsigned int shared_irq:1, /* shared irq possible */ |
384 | dxsuflo:1, /* disable transmit stop on uflo */ | 388 | dxsuflo:1, /* disable transmit stop on uflo */ |
385 | mii:1; /* mii port available */ | 389 | mii:1; /* mii port available */ |
386 | struct net_device *next; | 390 | struct net_device *next; |
387 | struct mii_if_info mii_if; | 391 | struct mii_if_info mii_if; |
388 | struct timer_list watchdog_timer; | 392 | struct timer_list watchdog_timer; |
389 | struct timer_list blink_timer; | 393 | struct timer_list blink_timer; |
390 | u32 msg_enable; /* debug message level */ | 394 | u32 msg_enable; /* debug message level */ |
391 | 395 | ||
392 | /* each bit indicates an available PHY */ | 396 | /* each bit indicates an available PHY */ |
393 | u32 phymask; | 397 | u32 phymask; |
394 | }; | 398 | }; |
395 | 399 | ||
396 | static void pcnet32_probe_vlbus(void); | 400 | static void pcnet32_probe_vlbus(void); |
397 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); | 401 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); |
398 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); | 402 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); |
399 | static int pcnet32_open(struct net_device *); | 403 | static int pcnet32_open(struct net_device *); |
400 | static int pcnet32_init_ring(struct net_device *); | 404 | static int pcnet32_init_ring(struct net_device *); |
401 | static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); | 405 | static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); |
402 | static int pcnet32_rx(struct net_device *); | 406 | static int pcnet32_rx(struct net_device *); |
403 | static void pcnet32_tx_timeout (struct net_device *dev); | 407 | static void pcnet32_tx_timeout(struct net_device *dev); |
404 | static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); | 408 | static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); |
405 | static int pcnet32_close(struct net_device *); | 409 | static int pcnet32_close(struct net_device *); |
406 | static struct net_device_stats *pcnet32_get_stats(struct net_device *); | 410 | static struct net_device_stats *pcnet32_get_stats(struct net_device *); |
407 | static void pcnet32_load_multicast(struct net_device *dev); | 411 | static void pcnet32_load_multicast(struct net_device *dev); |
408 | static void pcnet32_set_multicast_list(struct net_device *); | 412 | static void pcnet32_set_multicast_list(struct net_device *); |
409 | static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); | 413 | static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); |
410 | static void pcnet32_watchdog(struct net_device *); | 414 | static void pcnet32_watchdog(struct net_device *); |
411 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num); | 415 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num); |
412 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); | 416 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, |
417 | int val); | ||
413 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); | 418 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); |
414 | static void pcnet32_ethtool_test(struct net_device *dev, | 419 | static void pcnet32_ethtool_test(struct net_device *dev, |
415 | struct ethtool_test *eth_test, u64 *data); | 420 | struct ethtool_test *eth_test, u64 * data); |
416 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1); | 421 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); |
417 | static int pcnet32_phys_id(struct net_device *dev, u32 data); | 422 | static int pcnet32_phys_id(struct net_device *dev, u32 data); |
418 | static void pcnet32_led_blink_callback(struct net_device *dev); | 423 | static void pcnet32_led_blink_callback(struct net_device *dev); |
419 | static int pcnet32_get_regs_len(struct net_device *dev); | 424 | static int pcnet32_get_regs_len(struct net_device *dev); |
420 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 425 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
421 | void *ptr); | 426 | void *ptr); |
422 | static void pcnet32_purge_tx_ring(struct net_device *dev); | 427 | static void pcnet32_purge_tx_ring(struct net_device *dev); |
423 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); | 428 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); |
424 | static void pcnet32_free_ring(struct net_device *dev); | 429 | static void pcnet32_free_ring(struct net_device *dev); |
425 | static void pcnet32_check_media(struct net_device *dev, int verbose); | 430 | static void pcnet32_check_media(struct net_device *dev, int verbose); |
426 | 431 | ||
427 | |||
428 | enum pci_flags_bit { | 432 | enum pci_flags_bit { |
429 | PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, | 433 | PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4, |
430 | PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, | 434 | PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 = |
435 | 0x10 << 2, PCI_ADDR3 = 0x10 << 3, | ||
431 | }; | 436 | }; |
432 | 437 | ||
433 | 438 | static u16 pcnet32_wio_read_csr(unsigned long addr, int index) | |
434 | static u16 pcnet32_wio_read_csr (unsigned long addr, int index) | ||
435 | { | 439 | { |
436 | outw (index, addr+PCNET32_WIO_RAP); | 440 | outw(index, addr + PCNET32_WIO_RAP); |
437 | return inw (addr+PCNET32_WIO_RDP); | 441 | return inw(addr + PCNET32_WIO_RDP); |
438 | } | 442 | } |
439 | 443 | ||
440 | static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val) | 444 | static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) |
441 | { | 445 | { |
442 | outw (index, addr+PCNET32_WIO_RAP); | 446 | outw(index, addr + PCNET32_WIO_RAP); |
443 | outw (val, addr+PCNET32_WIO_RDP); | 447 | outw(val, addr + PCNET32_WIO_RDP); |
444 | } | 448 | } |
445 | 449 | ||
446 | static u16 pcnet32_wio_read_bcr (unsigned long addr, int index) | 450 | static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) |
447 | { | 451 | { |
448 | outw (index, addr+PCNET32_WIO_RAP); | 452 | outw(index, addr + PCNET32_WIO_RAP); |
449 | return inw (addr+PCNET32_WIO_BDP); | 453 | return inw(addr + PCNET32_WIO_BDP); |
450 | } | 454 | } |
451 | 455 | ||
452 | static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val) | 456 | static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) |
453 | { | 457 | { |
454 | outw (index, addr+PCNET32_WIO_RAP); | 458 | outw(index, addr + PCNET32_WIO_RAP); |
455 | outw (val, addr+PCNET32_WIO_BDP); | 459 | outw(val, addr + PCNET32_WIO_BDP); |
456 | } | 460 | } |
457 | 461 | ||
458 | static u16 pcnet32_wio_read_rap (unsigned long addr) | 462 | static u16 pcnet32_wio_read_rap(unsigned long addr) |
459 | { | 463 | { |
460 | return inw (addr+PCNET32_WIO_RAP); | 464 | return inw(addr + PCNET32_WIO_RAP); |
461 | } | 465 | } |
462 | 466 | ||
463 | static void pcnet32_wio_write_rap (unsigned long addr, u16 val) | 467 | static void pcnet32_wio_write_rap(unsigned long addr, u16 val) |
464 | { | 468 | { |
465 | outw (val, addr+PCNET32_WIO_RAP); | 469 | outw(val, addr + PCNET32_WIO_RAP); |
466 | } | 470 | } |
467 | 471 | ||
468 | static void pcnet32_wio_reset (unsigned long addr) | 472 | static void pcnet32_wio_reset(unsigned long addr) |
469 | { | 473 | { |
470 | inw (addr+PCNET32_WIO_RESET); | 474 | inw(addr + PCNET32_WIO_RESET); |
471 | } | 475 | } |
472 | 476 | ||
473 | static int pcnet32_wio_check (unsigned long addr) | 477 | static int pcnet32_wio_check(unsigned long addr) |
474 | { | 478 | { |
475 | outw (88, addr+PCNET32_WIO_RAP); | 479 | outw(88, addr + PCNET32_WIO_RAP); |
476 | return (inw (addr+PCNET32_WIO_RAP) == 88); | 480 | return (inw(addr + PCNET32_WIO_RAP) == 88); |
477 | } | 481 | } |
478 | 482 | ||
479 | static struct pcnet32_access pcnet32_wio = { | 483 | static struct pcnet32_access pcnet32_wio = { |
480 | .read_csr = pcnet32_wio_read_csr, | 484 | .read_csr = pcnet32_wio_read_csr, |
481 | .write_csr = pcnet32_wio_write_csr, | 485 | .write_csr = pcnet32_wio_write_csr, |
482 | .read_bcr = pcnet32_wio_read_bcr, | 486 | .read_bcr = pcnet32_wio_read_bcr, |
483 | .write_bcr = pcnet32_wio_write_bcr, | 487 | .write_bcr = pcnet32_wio_write_bcr, |
484 | .read_rap = pcnet32_wio_read_rap, | 488 | .read_rap = pcnet32_wio_read_rap, |
485 | .write_rap = pcnet32_wio_write_rap, | 489 | .write_rap = pcnet32_wio_write_rap, |
486 | .reset = pcnet32_wio_reset | 490 | .reset = pcnet32_wio_reset |
487 | }; | 491 | }; |
488 | 492 | ||
489 | static u16 pcnet32_dwio_read_csr (unsigned long addr, int index) | 493 | static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) |
490 | { | 494 | { |
491 | outl (index, addr+PCNET32_DWIO_RAP); | 495 | outl(index, addr + PCNET32_DWIO_RAP); |
492 | return (inl (addr+PCNET32_DWIO_RDP) & 0xffff); | 496 | return (inl(addr + PCNET32_DWIO_RDP) & 0xffff); |
493 | } | 497 | } |
494 | 498 | ||
495 | static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val) | 499 | static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) |
496 | { | 500 | { |
497 | outl (index, addr+PCNET32_DWIO_RAP); | 501 | outl(index, addr + PCNET32_DWIO_RAP); |
498 | outl (val, addr+PCNET32_DWIO_RDP); | 502 | outl(val, addr + PCNET32_DWIO_RDP); |
499 | } | 503 | } |
500 | 504 | ||
501 | static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index) | 505 | static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) |
502 | { | 506 | { |
503 | outl (index, addr+PCNET32_DWIO_RAP); | 507 | outl(index, addr + PCNET32_DWIO_RAP); |
504 | return (inl (addr+PCNET32_DWIO_BDP) & 0xffff); | 508 | return (inl(addr + PCNET32_DWIO_BDP) & 0xffff); |
505 | } | 509 | } |
506 | 510 | ||
507 | static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val) | 511 | static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) |
508 | { | 512 | { |
509 | outl (index, addr+PCNET32_DWIO_RAP); | 513 | outl(index, addr + PCNET32_DWIO_RAP); |
510 | outl (val, addr+PCNET32_DWIO_BDP); | 514 | outl(val, addr + PCNET32_DWIO_BDP); |
511 | } | 515 | } |
512 | 516 | ||
513 | static u16 pcnet32_dwio_read_rap (unsigned long addr) | 517 | static u16 pcnet32_dwio_read_rap(unsigned long addr) |
514 | { | 518 | { |
515 | return (inl (addr+PCNET32_DWIO_RAP) & 0xffff); | 519 | return (inl(addr + PCNET32_DWIO_RAP) & 0xffff); |
516 | } | 520 | } |
517 | 521 | ||
518 | static void pcnet32_dwio_write_rap (unsigned long addr, u16 val) | 522 | static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) |
519 | { | 523 | { |
520 | outl (val, addr+PCNET32_DWIO_RAP); | 524 | outl(val, addr + PCNET32_DWIO_RAP); |
521 | } | 525 | } |
522 | 526 | ||
523 | static void pcnet32_dwio_reset (unsigned long addr) | 527 | static void pcnet32_dwio_reset(unsigned long addr) |
524 | { | 528 | { |
525 | inl (addr+PCNET32_DWIO_RESET); | 529 | inl(addr + PCNET32_DWIO_RESET); |
526 | } | 530 | } |
527 | 531 | ||
528 | static int pcnet32_dwio_check (unsigned long addr) | 532 | static int pcnet32_dwio_check(unsigned long addr) |
529 | { | 533 | { |
530 | outl (88, addr+PCNET32_DWIO_RAP); | 534 | outl(88, addr + PCNET32_DWIO_RAP); |
531 | return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88); | 535 | return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); |
532 | } | 536 | } |
533 | 537 | ||
534 | static struct pcnet32_access pcnet32_dwio = { | 538 | static struct pcnet32_access pcnet32_dwio = { |
535 | .read_csr = pcnet32_dwio_read_csr, | 539 | .read_csr = pcnet32_dwio_read_csr, |
536 | .write_csr = pcnet32_dwio_write_csr, | 540 | .write_csr = pcnet32_dwio_write_csr, |
537 | .read_bcr = pcnet32_dwio_read_bcr, | 541 | .read_bcr = pcnet32_dwio_read_bcr, |
538 | .write_bcr = pcnet32_dwio_write_bcr, | 542 | .write_bcr = pcnet32_dwio_write_bcr, |
539 | .read_rap = pcnet32_dwio_read_rap, | 543 | .read_rap = pcnet32_dwio_read_rap, |
540 | .write_rap = pcnet32_dwio_write_rap, | 544 | .write_rap = pcnet32_dwio_write_rap, |
541 | .reset = pcnet32_dwio_reset | 545 | .reset = pcnet32_dwio_reset |
542 | }; | 546 | }; |
543 | 547 | ||
544 | #ifdef CONFIG_NET_POLL_CONTROLLER | 548 | #ifdef CONFIG_NET_POLL_CONTROLLER |
545 | static void pcnet32_poll_controller(struct net_device *dev) | 549 | static void pcnet32_poll_controller(struct net_device *dev) |
546 | { | 550 | { |
547 | disable_irq(dev->irq); | 551 | disable_irq(dev->irq); |
548 | pcnet32_interrupt(0, dev, NULL); | 552 | pcnet32_interrupt(0, dev, NULL); |
549 | enable_irq(dev->irq); | 553 | enable_irq(dev->irq); |
550 | } | 554 | } |
551 | #endif | 555 | #endif |
552 | 556 | ||
553 | |||
554 | static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 557 | static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
555 | { | 558 | { |
556 | struct pcnet32_private *lp = dev->priv; | 559 | struct pcnet32_private *lp = dev->priv; |
557 | unsigned long flags; | 560 | unsigned long flags; |
558 | int r = -EOPNOTSUPP; | 561 | int r = -EOPNOTSUPP; |
559 | 562 | ||
560 | if (lp->mii) { | 563 | if (lp->mii) { |
561 | spin_lock_irqsave(&lp->lock, flags); | 564 | spin_lock_irqsave(&lp->lock, flags); |
562 | mii_ethtool_gset(&lp->mii_if, cmd); | 565 | mii_ethtool_gset(&lp->mii_if, cmd); |
563 | spin_unlock_irqrestore(&lp->lock, flags); | 566 | spin_unlock_irqrestore(&lp->lock, flags); |
564 | r = 0; | 567 | r = 0; |
565 | } | 568 | } |
566 | return r; | 569 | return r; |
567 | } | 570 | } |
568 | 571 | ||
569 | static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 572 | static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
570 | { | 573 | { |
571 | struct pcnet32_private *lp = dev->priv; | 574 | struct pcnet32_private *lp = dev->priv; |
572 | unsigned long flags; | 575 | unsigned long flags; |
573 | int r = -EOPNOTSUPP; | 576 | int r = -EOPNOTSUPP; |
574 | 577 | ||
575 | if (lp->mii) { | 578 | if (lp->mii) { |
576 | spin_lock_irqsave(&lp->lock, flags); | 579 | spin_lock_irqsave(&lp->lock, flags); |
577 | r = mii_ethtool_sset(&lp->mii_if, cmd); | 580 | r = mii_ethtool_sset(&lp->mii_if, cmd); |
578 | spin_unlock_irqrestore(&lp->lock, flags); | 581 | spin_unlock_irqrestore(&lp->lock, flags); |
579 | } | 582 | } |
580 | return r; | 583 | return r; |
581 | } | 584 | } |
582 | 585 | ||
583 | static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 586 | static void pcnet32_get_drvinfo(struct net_device *dev, |
587 | struct ethtool_drvinfo *info) | ||
584 | { | 588 | { |
585 | struct pcnet32_private *lp = dev->priv; | 589 | struct pcnet32_private *lp = dev->priv; |
586 | 590 | ||
587 | strcpy (info->driver, DRV_NAME); | 591 | strcpy(info->driver, DRV_NAME); |
588 | strcpy (info->version, DRV_VERSION); | 592 | strcpy(info->version, DRV_VERSION); |
589 | if (lp->pci_dev) | 593 | if (lp->pci_dev) |
590 | strcpy (info->bus_info, pci_name(lp->pci_dev)); | 594 | strcpy(info->bus_info, pci_name(lp->pci_dev)); |
591 | else | 595 | else |
592 | sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); | 596 | sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); |
593 | } | 597 | } |
594 | 598 | ||
595 | static u32 pcnet32_get_link(struct net_device *dev) | 599 | static u32 pcnet32_get_link(struct net_device *dev) |
596 | { | 600 | { |
597 | struct pcnet32_private *lp = dev->priv; | 601 | struct pcnet32_private *lp = dev->priv; |
598 | unsigned long flags; | 602 | unsigned long flags; |
599 | int r; | 603 | int r; |
600 | |||
601 | spin_lock_irqsave(&lp->lock, flags); | ||
602 | if (lp->mii) { | ||
603 | r = mii_link_ok(&lp->mii_if); | ||
604 | } else { | ||
605 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
606 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
607 | } | ||
608 | spin_unlock_irqrestore(&lp->lock, flags); | ||
609 | 604 | ||
610 | return r; | 605 | spin_lock_irqsave(&lp->lock, flags); |
606 | if (lp->mii) { | ||
607 | r = mii_link_ok(&lp->mii_if); | ||
608 | } else { | ||
609 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
610 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
611 | } | ||
612 | spin_unlock_irqrestore(&lp->lock, flags); | ||
613 | |||
614 | return r; | ||
611 | } | 615 | } |
612 | 616 | ||
613 | static u32 pcnet32_get_msglevel(struct net_device *dev) | 617 | static u32 pcnet32_get_msglevel(struct net_device *dev) |
614 | { | 618 | { |
615 | struct pcnet32_private *lp = dev->priv; | 619 | struct pcnet32_private *lp = dev->priv; |
616 | return lp->msg_enable; | 620 | return lp->msg_enable; |
617 | } | 621 | } |
618 | 622 | ||
619 | static void pcnet32_set_msglevel(struct net_device *dev, u32 value) | 623 | static void pcnet32_set_msglevel(struct net_device *dev, u32 value) |
620 | { | 624 | { |
621 | struct pcnet32_private *lp = dev->priv; | 625 | struct pcnet32_private *lp = dev->priv; |
622 | lp->msg_enable = value; | 626 | lp->msg_enable = value; |
623 | } | 627 | } |
624 | 628 | ||
625 | static int pcnet32_nway_reset(struct net_device *dev) | 629 | static int pcnet32_nway_reset(struct net_device *dev) |
626 | { | 630 | { |
627 | struct pcnet32_private *lp = dev->priv; | 631 | struct pcnet32_private *lp = dev->priv; |
628 | unsigned long flags; | 632 | unsigned long flags; |
629 | int r = -EOPNOTSUPP; | 633 | int r = -EOPNOTSUPP; |
630 | 634 | ||
631 | if (lp->mii) { | 635 | if (lp->mii) { |
632 | spin_lock_irqsave(&lp->lock, flags); | 636 | spin_lock_irqsave(&lp->lock, flags); |
633 | r = mii_nway_restart(&lp->mii_if); | 637 | r = mii_nway_restart(&lp->mii_if); |
634 | spin_unlock_irqrestore(&lp->lock, flags); | 638 | spin_unlock_irqrestore(&lp->lock, flags); |
635 | } | 639 | } |
636 | return r; | 640 | return r; |
637 | } | 641 | } |
638 | 642 | ||
639 | static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 643 | static void pcnet32_get_ringparam(struct net_device *dev, |
644 | struct ethtool_ringparam *ering) | ||
640 | { | 645 | { |
641 | struct pcnet32_private *lp = dev->priv; | 646 | struct pcnet32_private *lp = dev->priv; |
642 | 647 | ||
643 | ering->tx_max_pending = TX_MAX_RING_SIZE - 1; | 648 | ering->tx_max_pending = TX_MAX_RING_SIZE - 1; |
644 | ering->tx_pending = lp->tx_ring_size - 1; | 649 | ering->tx_pending = lp->tx_ring_size - 1; |
645 | ering->rx_max_pending = RX_MAX_RING_SIZE - 1; | 650 | ering->rx_max_pending = RX_MAX_RING_SIZE - 1; |
646 | ering->rx_pending = lp->rx_ring_size - 1; | 651 | ering->rx_pending = lp->rx_ring_size - 1; |
647 | } | 652 | } |
648 | 653 | ||
649 | static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 654 | static int pcnet32_set_ringparam(struct net_device *dev, |
655 | struct ethtool_ringparam *ering) | ||
650 | { | 656 | { |
651 | struct pcnet32_private *lp = dev->priv; | 657 | struct pcnet32_private *lp = dev->priv; |
652 | unsigned long flags; | 658 | unsigned long flags; |
653 | int i; | 659 | int i; |
654 | 660 | ||
655 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) | 661 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) |
656 | return -EINVAL; | 662 | return -EINVAL; |
657 | 663 | ||
658 | if (netif_running(dev)) | 664 | if (netif_running(dev)) |
659 | pcnet32_close(dev); | 665 | pcnet32_close(dev); |
660 | 666 | ||
661 | spin_lock_irqsave(&lp->lock, flags); | 667 | spin_lock_irqsave(&lp->lock, flags); |
662 | pcnet32_free_ring(dev); | ||
663 | lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE); | ||
664 | lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE); | ||
665 | |||
666 | /* set the minimum ring size to 4, to allow the loopback test to work | ||
667 | * unchanged. | ||
668 | */ | ||
669 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { | ||
670 | if (lp->tx_ring_size <= (1 << i)) | ||
671 | break; | ||
672 | } | ||
673 | lp->tx_ring_size = (1 << i); | ||
674 | lp->tx_mod_mask = lp->tx_ring_size - 1; | ||
675 | lp->tx_len_bits = (i << 12); | ||
676 | |||
677 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { | ||
678 | if (lp->rx_ring_size <= (1 << i)) | ||
679 | break; | ||
680 | } | ||
681 | lp->rx_ring_size = (1 << i); | ||
682 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
683 | lp->rx_len_bits = (i << 4); | ||
684 | |||
685 | if (pcnet32_alloc_ring(dev, dev->name)) { | ||
686 | pcnet32_free_ring(dev); | 668 | pcnet32_free_ring(dev); |
687 | spin_unlock_irqrestore(&lp->lock, flags); | 669 | lp->tx_ring_size = |
688 | return -ENOMEM; | 670 | min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); |
689 | } | 671 | lp->rx_ring_size = |
672 | min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); | ||
673 | |||
674 | /* set the minimum ring size to 4, to allow the loopback test to work | ||
675 | * unchanged. | ||
676 | */ | ||
677 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { | ||
678 | if (lp->tx_ring_size <= (1 << i)) | ||
679 | break; | ||
680 | } | ||
681 | lp->tx_ring_size = (1 << i); | ||
682 | lp->tx_mod_mask = lp->tx_ring_size - 1; | ||
683 | lp->tx_len_bits = (i << 12); | ||
690 | 684 | ||
691 | spin_unlock_irqrestore(&lp->lock, flags); | 685 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { |
686 | if (lp->rx_ring_size <= (1 << i)) | ||
687 | break; | ||
688 | } | ||
689 | lp->rx_ring_size = (1 << i); | ||
690 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
691 | lp->rx_len_bits = (i << 4); | ||
692 | |||
693 | if (pcnet32_alloc_ring(dev, dev->name)) { | ||
694 | pcnet32_free_ring(dev); | ||
695 | spin_unlock_irqrestore(&lp->lock, flags); | ||
696 | return -ENOMEM; | ||
697 | } | ||
692 | 698 | ||
693 | if (pcnet32_debug & NETIF_MSG_DRV) | 699 | spin_unlock_irqrestore(&lp->lock, flags); |
694 | printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n", | ||
695 | dev->name, lp->rx_ring_size, lp->tx_ring_size); | ||
696 | 700 | ||
697 | if (netif_running(dev)) | 701 | if (pcnet32_debug & NETIF_MSG_DRV) |
698 | pcnet32_open(dev); | 702 | printk(KERN_INFO PFX |
703 | "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, | ||
704 | lp->rx_ring_size, lp->tx_ring_size); | ||
699 | 705 | ||
700 | return 0; | 706 | if (netif_running(dev)) |
707 | pcnet32_open(dev); | ||
708 | |||
709 | return 0; | ||
701 | } | 710 | } |
702 | 711 | ||
703 | static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) | 712 | static void pcnet32_get_strings(struct net_device *dev, u32 stringset, |
713 | u8 * data) | ||
704 | { | 714 | { |
705 | memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); | 715 | memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); |
706 | } | 716 | } |
707 | 717 | ||
708 | static int pcnet32_self_test_count(struct net_device *dev) | 718 | static int pcnet32_self_test_count(struct net_device *dev) |
709 | { | 719 | { |
710 | return PCNET32_TEST_LEN; | 720 | return PCNET32_TEST_LEN; |
711 | } | 721 | } |
712 | 722 | ||
713 | static void pcnet32_ethtool_test(struct net_device *dev, | 723 | static void pcnet32_ethtool_test(struct net_device *dev, |
714 | struct ethtool_test *test, u64 *data) | 724 | struct ethtool_test *test, u64 * data) |
715 | { | 725 | { |
716 | struct pcnet32_private *lp = dev->priv; | 726 | struct pcnet32_private *lp = dev->priv; |
717 | int rc; | 727 | int rc; |
718 | 728 | ||
719 | if (test->flags == ETH_TEST_FL_OFFLINE) { | 729 | if (test->flags == ETH_TEST_FL_OFFLINE) { |
720 | rc = pcnet32_loopback_test(dev, data); | 730 | rc = pcnet32_loopback_test(dev, data); |
721 | if (rc) { | 731 | if (rc) { |
722 | if (netif_msg_hw(lp)) | 732 | if (netif_msg_hw(lp)) |
723 | printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name); | 733 | printk(KERN_DEBUG "%s: Loopback test failed.\n", |
724 | test->flags |= ETH_TEST_FL_FAILED; | 734 | dev->name); |
735 | test->flags |= ETH_TEST_FL_FAILED; | ||
736 | } else if (netif_msg_hw(lp)) | ||
737 | printk(KERN_DEBUG "%s: Loopback test passed.\n", | ||
738 | dev->name); | ||
725 | } else if (netif_msg_hw(lp)) | 739 | } else if (netif_msg_hw(lp)) |
726 | printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name); | 740 | printk(KERN_DEBUG |
727 | } else if (netif_msg_hw(lp)) | 741 | "%s: No tests to run (specify 'Offline' on ethtool).", |
728 | printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name); | 742 | dev->name); |
729 | } /* end pcnet32_ethtool_test */ | 743 | } /* end pcnet32_ethtool_test */ |
730 | 744 | ||
731 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) | 745 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) |
732 | { | 746 | { |
733 | struct pcnet32_private *lp = dev->priv; | 747 | struct pcnet32_private *lp = dev->priv; |
734 | struct pcnet32_access *a = &lp->a; /* access to registers */ | 748 | struct pcnet32_access *a = &lp->a; /* access to registers */ |
735 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | 749 | ulong ioaddr = dev->base_addr; /* card base I/O address */ |
736 | struct sk_buff *skb; /* sk buff */ | 750 | struct sk_buff *skb; /* sk buff */ |
737 | int x, i; /* counters */ | 751 | int x, i; /* counters */ |
738 | int numbuffs = 4; /* number of TX/RX buffers and descs */ | 752 | int numbuffs = 4; /* number of TX/RX buffers and descs */ |
739 | u16 status = 0x8300; /* TX ring status */ | 753 | u16 status = 0x8300; /* TX ring status */ |
740 | u16 teststatus; /* test of ring status */ | 754 | u16 teststatus; /* test of ring status */ |
741 | int rc; /* return code */ | 755 | int rc; /* return code */ |
742 | int size; /* size of packets */ | 756 | int size; /* size of packets */ |
743 | unsigned char *packet; /* source packet data */ | 757 | unsigned char *packet; /* source packet data */ |
744 | static const int data_len = 60; /* length of source packets */ | 758 | static const int data_len = 60; /* length of source packets */ |
745 | unsigned long flags; | 759 | unsigned long flags; |
746 | unsigned long ticks; | 760 | unsigned long ticks; |
747 | 761 | ||
748 | *data1 = 1; /* status of test, default to fail */ | 762 | *data1 = 1; /* status of test, default to fail */ |
749 | rc = 1; /* default to fail */ | 763 | rc = 1; /* default to fail */ |
750 | 764 | ||
751 | if (netif_running(dev)) | 765 | if (netif_running(dev)) |
752 | pcnet32_close(dev); | 766 | pcnet32_close(dev); |
753 | 767 | ||
754 | spin_lock_irqsave(&lp->lock, flags); | 768 | spin_lock_irqsave(&lp->lock, flags); |
755 | 769 | ||
756 | /* Reset the PCNET32 */ | 770 | /* Reset the PCNET32 */ |
757 | lp->a.reset (ioaddr); | 771 | lp->a.reset(ioaddr); |
758 | 772 | ||
759 | /* switch pcnet32 to 32bit mode */ | 773 | /* switch pcnet32 to 32bit mode */ |
760 | lp->a.write_bcr (ioaddr, 20, 2); | 774 | lp->a.write_bcr(ioaddr, 20, 2); |
761 | 775 | ||
762 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 776 | lp->init_block.mode = |
763 | lp->init_block.filter[0] = 0; | 777 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
764 | lp->init_block.filter[1] = 0; | 778 | lp->init_block.filter[0] = 0; |
765 | 779 | lp->init_block.filter[1] = 0; | |
766 | /* purge & init rings but don't actually restart */ | 780 | |
767 | pcnet32_restart(dev, 0x0000); | 781 | /* purge & init rings but don't actually restart */ |
768 | 782 | pcnet32_restart(dev, 0x0000); | |
769 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | 783 | |
770 | 784 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | |
771 | /* Initialize Transmit buffers. */ | 785 | |
772 | size = data_len + 15; | 786 | /* Initialize Transmit buffers. */ |
773 | for (x=0; x<numbuffs; x++) { | 787 | size = data_len + 15; |
774 | if (!(skb = dev_alloc_skb(size))) { | 788 | for (x = 0; x < numbuffs; x++) { |
775 | if (netif_msg_hw(lp)) | 789 | if (!(skb = dev_alloc_skb(size))) { |
776 | printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n", | 790 | if (netif_msg_hw(lp)) |
777 | dev->name, __LINE__); | 791 | printk(KERN_DEBUG |
778 | goto clean_up; | 792 | "%s: Cannot allocate skb at line: %d!\n", |
779 | } else { | 793 | dev->name, __LINE__); |
780 | packet = skb->data; | 794 | goto clean_up; |
781 | skb_put(skb, size); /* create space for data */ | 795 | } else { |
782 | lp->tx_skbuff[x] = skb; | 796 | packet = skb->data; |
783 | lp->tx_ring[x].length = le16_to_cpu(-skb->len); | 797 | skb_put(skb, size); /* create space for data */ |
784 | lp->tx_ring[x].misc = 0; | 798 | lp->tx_skbuff[x] = skb; |
785 | 799 | lp->tx_ring[x].length = le16_to_cpu(-skb->len); | |
786 | /* put DA and SA into the skb */ | 800 | lp->tx_ring[x].misc = 0; |
787 | for (i=0; i<6; i++) | 801 | |
788 | *packet++ = dev->dev_addr[i]; | 802 | /* put DA and SA into the skb */ |
789 | for (i=0; i<6; i++) | 803 | for (i = 0; i < 6; i++) |
790 | *packet++ = dev->dev_addr[i]; | 804 | *packet++ = dev->dev_addr[i]; |
791 | /* type */ | 805 | for (i = 0; i < 6; i++) |
792 | *packet++ = 0x08; | 806 | *packet++ = dev->dev_addr[i]; |
793 | *packet++ = 0x06; | 807 | /* type */ |
794 | /* packet number */ | 808 | *packet++ = 0x08; |
795 | *packet++ = x; | 809 | *packet++ = 0x06; |
796 | /* fill packet with data */ | 810 | /* packet number */ |
797 | for (i=0; i<data_len; i++) | 811 | *packet++ = x; |
798 | *packet++ = i; | 812 | /* fill packet with data */ |
799 | 813 | for (i = 0; i < data_len; i++) | |
800 | lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, | 814 | *packet++ = i; |
801 | skb->len, PCI_DMA_TODEVICE); | 815 | |
802 | lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]); | 816 | lp->tx_dma_addr[x] = |
803 | wmb(); /* Make sure owner changes after all others are visible */ | 817 | pci_map_single(lp->pci_dev, skb->data, skb->len, |
804 | lp->tx_ring[x].status = le16_to_cpu(status); | 818 | PCI_DMA_TODEVICE); |
805 | } | 819 | lp->tx_ring[x].base = |
806 | } | 820 | (u32) le32_to_cpu(lp->tx_dma_addr[x]); |
807 | 821 | wmb(); /* Make sure owner changes after all others are visible */ | |
808 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ | 822 | lp->tx_ring[x].status = le16_to_cpu(status); |
809 | x = x | 0x0002; | 823 | } |
810 | a->write_bcr(ioaddr, 32, x); | ||
811 | |||
812 | lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ | ||
813 | |||
814 | teststatus = le16_to_cpu(0x8000); | ||
815 | lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ | ||
816 | |||
817 | /* Check status of descriptors */ | ||
818 | for (x=0; x<numbuffs; x++) { | ||
819 | ticks = 0; | ||
820 | rmb(); | ||
821 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { | ||
822 | spin_unlock_irqrestore(&lp->lock, flags); | ||
823 | mdelay(1); | ||
824 | spin_lock_irqsave(&lp->lock, flags); | ||
825 | rmb(); | ||
826 | ticks++; | ||
827 | } | ||
828 | if (ticks == 200) { | ||
829 | if (netif_msg_hw(lp)) | ||
830 | printk("%s: Desc %d failed to reset!\n",dev->name,x); | ||
831 | break; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | ||
836 | wmb(); | ||
837 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { | ||
838 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); | ||
839 | |||
840 | for (x=0; x<numbuffs; x++) { | ||
841 | printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); | ||
842 | skb = lp->rx_skbuff[x]; | ||
843 | for (i=0; i<size; i++) { | ||
844 | printk("%02x ", *(skb->data+i)); | ||
845 | } | ||
846 | printk("\n"); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | x = 0; | ||
851 | rc = 0; | ||
852 | while (x<numbuffs && !rc) { | ||
853 | skb = lp->rx_skbuff[x]; | ||
854 | packet = lp->tx_skbuff[x]->data; | ||
855 | for (i=0; i<size; i++) { | ||
856 | if (*(skb->data+i) != packet[i]) { | ||
857 | if (netif_msg_hw(lp)) | ||
858 | printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n", | ||
859 | dev->name, i, *(skb->data+i), packet[i]); | ||
860 | rc = 1; | ||
861 | break; | ||
862 | } | ||
863 | } | 824 | } |
864 | x++; | ||
865 | } | ||
866 | if (!rc) { | ||
867 | *data1 = 0; | ||
868 | } | ||
869 | 825 | ||
870 | clean_up: | 826 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ |
871 | pcnet32_purge_tx_ring(dev); | 827 | x = x | 0x0002; |
872 | x = a->read_csr(ioaddr, 15) & 0xFFFF; | 828 | a->write_bcr(ioaddr, 32, x); |
873 | a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ | 829 | |
830 | lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ | ||
831 | |||
832 | teststatus = le16_to_cpu(0x8000); | ||
833 | lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ | ||
834 | |||
835 | /* Check status of descriptors */ | ||
836 | for (x = 0; x < numbuffs; x++) { | ||
837 | ticks = 0; | ||
838 | rmb(); | ||
839 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { | ||
840 | spin_unlock_irqrestore(&lp->lock, flags); | ||
841 | mdelay(1); | ||
842 | spin_lock_irqsave(&lp->lock, flags); | ||
843 | rmb(); | ||
844 | ticks++; | ||
845 | } | ||
846 | if (ticks == 200) { | ||
847 | if (netif_msg_hw(lp)) | ||
848 | printk("%s: Desc %d failed to reset!\n", | ||
849 | dev->name, x); | ||
850 | break; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | ||
855 | wmb(); | ||
856 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { | ||
857 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); | ||
858 | |||
859 | for (x = 0; x < numbuffs; x++) { | ||
860 | printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); | ||
861 | skb = lp->rx_skbuff[x]; | ||
862 | for (i = 0; i < size; i++) { | ||
863 | printk("%02x ", *(skb->data + i)); | ||
864 | } | ||
865 | printk("\n"); | ||
866 | } | ||
867 | } | ||
874 | 868 | ||
875 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ | 869 | x = 0; |
876 | x = x & ~0x0002; | 870 | rc = 0; |
877 | a->write_bcr(ioaddr, 32, x); | 871 | while (x < numbuffs && !rc) { |
872 | skb = lp->rx_skbuff[x]; | ||
873 | packet = lp->tx_skbuff[x]->data; | ||
874 | for (i = 0; i < size; i++) { | ||
875 | if (*(skb->data + i) != packet[i]) { | ||
876 | if (netif_msg_hw(lp)) | ||
877 | printk(KERN_DEBUG | ||
878 | "%s: Error in compare! %2x - %02x %02x\n", | ||
879 | dev->name, i, *(skb->data + i), | ||
880 | packet[i]); | ||
881 | rc = 1; | ||
882 | break; | ||
883 | } | ||
884 | } | ||
885 | x++; | ||
886 | } | ||
887 | if (!rc) { | ||
888 | *data1 = 0; | ||
889 | } | ||
878 | 890 | ||
879 | spin_unlock_irqrestore(&lp->lock, flags); | 891 | clean_up: |
892 | pcnet32_purge_tx_ring(dev); | ||
893 | x = a->read_csr(ioaddr, 15) & 0xFFFF; | ||
894 | a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ | ||
880 | 895 | ||
881 | if (netif_running(dev)) { | 896 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ |
882 | pcnet32_open(dev); | 897 | x = x & ~0x0002; |
883 | } else { | 898 | a->write_bcr(ioaddr, 32, x); |
884 | lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */ | ||
885 | } | ||
886 | 899 | ||
887 | return(rc); | 900 | spin_unlock_irqrestore(&lp->lock, flags); |
888 | } /* end pcnet32_loopback_test */ | 901 | |
902 | if (netif_running(dev)) { | ||
903 | pcnet32_open(dev); | ||
904 | } else { | ||
905 | lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ | ||
906 | } | ||
907 | |||
908 | return (rc); | ||
909 | } /* end pcnet32_loopback_test */ | ||
889 | 910 | ||
890 | static void pcnet32_led_blink_callback(struct net_device *dev) | 911 | static void pcnet32_led_blink_callback(struct net_device *dev) |
891 | { | 912 | { |
892 | struct pcnet32_private *lp = dev->priv; | 913 | struct pcnet32_private *lp = dev->priv; |
893 | struct pcnet32_access *a = &lp->a; | 914 | struct pcnet32_access *a = &lp->a; |
894 | ulong ioaddr = dev->base_addr; | 915 | ulong ioaddr = dev->base_addr; |
895 | unsigned long flags; | 916 | unsigned long flags; |
896 | int i; | 917 | int i; |
897 | 918 | ||
898 | spin_lock_irqsave(&lp->lock, flags); | 919 | spin_lock_irqsave(&lp->lock, flags); |
899 | for (i=4; i<8; i++) { | 920 | for (i = 4; i < 8; i++) { |
900 | a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); | 921 | a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); |
901 | } | 922 | } |
902 | spin_unlock_irqrestore(&lp->lock, flags); | 923 | spin_unlock_irqrestore(&lp->lock, flags); |
903 | 924 | ||
904 | mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); | 925 | mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); |
905 | } | 926 | } |
906 | 927 | ||
907 | static int pcnet32_phys_id(struct net_device *dev, u32 data) | 928 | static int pcnet32_phys_id(struct net_device *dev, u32 data) |
908 | { | 929 | { |
909 | struct pcnet32_private *lp = dev->priv; | 930 | struct pcnet32_private *lp = dev->priv; |
910 | struct pcnet32_access *a = &lp->a; | 931 | struct pcnet32_access *a = &lp->a; |
911 | ulong ioaddr = dev->base_addr; | 932 | ulong ioaddr = dev->base_addr; |
912 | unsigned long flags; | 933 | unsigned long flags; |
913 | int i, regs[4]; | 934 | int i, regs[4]; |
914 | 935 | ||
915 | if (!lp->blink_timer.function) { | 936 | if (!lp->blink_timer.function) { |
916 | init_timer(&lp->blink_timer); | 937 | init_timer(&lp->blink_timer); |
917 | lp->blink_timer.function = (void *) pcnet32_led_blink_callback; | 938 | lp->blink_timer.function = (void *)pcnet32_led_blink_callback; |
918 | lp->blink_timer.data = (unsigned long) dev; | 939 | lp->blink_timer.data = (unsigned long)dev; |
919 | } | 940 | } |
920 | 941 | ||
921 | /* Save the current value of the bcrs */ | 942 | /* Save the current value of the bcrs */ |
922 | spin_lock_irqsave(&lp->lock, flags); | 943 | spin_lock_irqsave(&lp->lock, flags); |
923 | for (i=4; i<8; i++) { | 944 | for (i = 4; i < 8; i++) { |
924 | regs[i-4] = a->read_bcr(ioaddr, i); | 945 | regs[i - 4] = a->read_bcr(ioaddr, i); |
925 | } | 946 | } |
926 | spin_unlock_irqrestore(&lp->lock, flags); | 947 | spin_unlock_irqrestore(&lp->lock, flags); |
927 | 948 | ||
928 | mod_timer(&lp->blink_timer, jiffies); | 949 | mod_timer(&lp->blink_timer, jiffies); |
929 | set_current_state(TASK_INTERRUPTIBLE); | 950 | set_current_state(TASK_INTERRUPTIBLE); |
930 | 951 | ||
931 | if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))) | 952 | if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))) |
932 | data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); | 953 | data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ); |
933 | 954 | ||
934 | msleep_interruptible(data * 1000); | 955 | msleep_interruptible(data * 1000); |
935 | del_timer_sync(&lp->blink_timer); | 956 | del_timer_sync(&lp->blink_timer); |
936 | 957 | ||
937 | /* Restore the original value of the bcrs */ | 958 | /* Restore the original value of the bcrs */ |
938 | spin_lock_irqsave(&lp->lock, flags); | 959 | spin_lock_irqsave(&lp->lock, flags); |
939 | for (i=4; i<8; i++) { | 960 | for (i = 4; i < 8; i++) { |
940 | a->write_bcr(ioaddr, i, regs[i-4]); | 961 | a->write_bcr(ioaddr, i, regs[i - 4]); |
941 | } | 962 | } |
942 | spin_unlock_irqrestore(&lp->lock, flags); | 963 | spin_unlock_irqrestore(&lp->lock, flags); |
943 | 964 | ||
944 | return 0; | 965 | return 0; |
945 | } | 966 | } |
946 | 967 | ||
947 | #define PCNET32_REGS_PER_PHY 32 | 968 | #define PCNET32_REGS_PER_PHY 32 |
948 | #define PCNET32_MAX_PHYS 32 | 969 | #define PCNET32_MAX_PHYS 32 |
949 | static int pcnet32_get_regs_len(struct net_device *dev) | 970 | static int pcnet32_get_regs_len(struct net_device *dev) |
950 | { | 971 | { |
951 | struct pcnet32_private *lp = dev->priv; | 972 | struct pcnet32_private *lp = dev->priv; |
952 | int j = lp->phycount * PCNET32_REGS_PER_PHY; | 973 | int j = lp->phycount * PCNET32_REGS_PER_PHY; |
953 | 974 | ||
954 | return((PCNET32_NUM_REGS + j) * sizeof(u16)); | 975 | return ((PCNET32_NUM_REGS + j) * sizeof(u16)); |
955 | } | 976 | } |
956 | 977 | ||
957 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 978 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
958 | void *ptr) | 979 | void *ptr) |
959 | { | 980 | { |
960 | int i, csr0; | 981 | int i, csr0; |
961 | u16 *buff = ptr; | 982 | u16 *buff = ptr; |
962 | struct pcnet32_private *lp = dev->priv; | 983 | struct pcnet32_private *lp = dev->priv; |
963 | struct pcnet32_access *a = &lp->a; | 984 | struct pcnet32_access *a = &lp->a; |
964 | ulong ioaddr = dev->base_addr; | 985 | ulong ioaddr = dev->base_addr; |
965 | int ticks; | 986 | int ticks; |
966 | unsigned long flags; | 987 | unsigned long flags; |
967 | 988 | ||
968 | spin_lock_irqsave(&lp->lock, flags); | 989 | spin_lock_irqsave(&lp->lock, flags); |
969 | 990 | ||
970 | csr0 = a->read_csr(ioaddr, 0); | 991 | csr0 = a->read_csr(ioaddr, 0); |
971 | if (!(csr0 & 0x0004)) { /* If not stopped */ | 992 | if (!(csr0 & 0x0004)) { /* If not stopped */ |
972 | /* set SUSPEND (SPND) - CSR5 bit 0 */ | 993 | /* set SUSPEND (SPND) - CSR5 bit 0 */ |
973 | a->write_csr(ioaddr, 5, 0x0001); | 994 | a->write_csr(ioaddr, 5, 0x0001); |
974 | 995 | ||
975 | /* poll waiting for bit to be set */ | 996 | /* poll waiting for bit to be set */ |
976 | ticks = 0; | 997 | ticks = 0; |
977 | while (!(a->read_csr(ioaddr, 5) & 0x0001)) { | 998 | while (!(a->read_csr(ioaddr, 5) & 0x0001)) { |
978 | spin_unlock_irqrestore(&lp->lock, flags); | 999 | spin_unlock_irqrestore(&lp->lock, flags); |
979 | mdelay(1); | 1000 | mdelay(1); |
980 | spin_lock_irqsave(&lp->lock, flags); | 1001 | spin_lock_irqsave(&lp->lock, flags); |
981 | ticks++; | 1002 | ticks++; |
982 | if (ticks > 200) { | 1003 | if (ticks > 200) { |
983 | if (netif_msg_hw(lp)) | 1004 | if (netif_msg_hw(lp)) |
984 | printk(KERN_DEBUG "%s: Error getting into suspend!\n", | 1005 | printk(KERN_DEBUG |
985 | dev->name); | 1006 | "%s: Error getting into suspend!\n", |
986 | break; | 1007 | dev->name); |
987 | } | 1008 | break; |
988 | } | 1009 | } |
989 | } | ||
990 | |||
991 | /* read address PROM */ | ||
992 | for (i=0; i<16; i += 2) | ||
993 | *buff++ = inw(ioaddr + i); | ||
994 | |||
995 | /* read control and status registers */ | ||
996 | for (i=0; i<90; i++) { | ||
997 | *buff++ = a->read_csr(ioaddr, i); | ||
998 | } | ||
999 | |||
1000 | *buff++ = a->read_csr(ioaddr, 112); | ||
1001 | *buff++ = a->read_csr(ioaddr, 114); | ||
1002 | |||
1003 | /* read bus configuration registers */ | ||
1004 | for (i=0; i<30; i++) { | ||
1005 | *buff++ = a->read_bcr(ioaddr, i); | ||
1006 | } | ||
1007 | *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ | ||
1008 | for (i=31; i<36; i++) { | ||
1009 | *buff++ = a->read_bcr(ioaddr, i); | ||
1010 | } | ||
1011 | |||
1012 | /* read mii phy registers */ | ||
1013 | if (lp->mii) { | ||
1014 | int j; | ||
1015 | for (j=0; j<PCNET32_MAX_PHYS; j++) { | ||
1016 | if (lp->phymask & (1 << j)) { | ||
1017 | for (i=0; i<PCNET32_REGS_PER_PHY; i++) { | ||
1018 | lp->a.write_bcr(ioaddr, 33, (j << 5) | i); | ||
1019 | *buff++ = lp->a.read_bcr(ioaddr, 34); | ||
1020 | } | 1010 | } |
1021 | } | ||
1022 | } | 1011 | } |
1023 | } | ||
1024 | 1012 | ||
1025 | if (!(csr0 & 0x0004)) { /* If not stopped */ | 1013 | /* read address PROM */ |
1026 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ | 1014 | for (i = 0; i < 16; i += 2) |
1027 | a->write_csr(ioaddr, 5, 0x0000); | 1015 | *buff++ = inw(ioaddr + i); |
1028 | } | 1016 | |
1017 | /* read control and status registers */ | ||
1018 | for (i = 0; i < 90; i++) { | ||
1019 | *buff++ = a->read_csr(ioaddr, i); | ||
1020 | } | ||
1021 | |||
1022 | *buff++ = a->read_csr(ioaddr, 112); | ||
1023 | *buff++ = a->read_csr(ioaddr, 114); | ||
1029 | 1024 | ||
1030 | spin_unlock_irqrestore(&lp->lock, flags); | 1025 | /* read bus configuration registers */ |
1026 | for (i = 0; i < 30; i++) { | ||
1027 | *buff++ = a->read_bcr(ioaddr, i); | ||
1028 | } | ||
1029 | *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ | ||
1030 | for (i = 31; i < 36; i++) { | ||
1031 | *buff++ = a->read_bcr(ioaddr, i); | ||
1032 | } | ||
1033 | |||
1034 | /* read mii phy registers */ | ||
1035 | if (lp->mii) { | ||
1036 | int j; | ||
1037 | for (j = 0; j < PCNET32_MAX_PHYS; j++) { | ||
1038 | if (lp->phymask & (1 << j)) { | ||
1039 | for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { | ||
1040 | lp->a.write_bcr(ioaddr, 33, | ||
1041 | (j << 5) | i); | ||
1042 | *buff++ = lp->a.read_bcr(ioaddr, 34); | ||
1043 | } | ||
1044 | } | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | if (!(csr0 & 0x0004)) { /* If not stopped */ | ||
1049 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ | ||
1050 | a->write_csr(ioaddr, 5, 0x0000); | ||
1051 | } | ||
1052 | |||
1053 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1031 | } | 1054 | } |
1032 | 1055 | ||
1033 | static struct ethtool_ops pcnet32_ethtool_ops = { | 1056 | static struct ethtool_ops pcnet32_ethtool_ops = { |
1034 | .get_settings = pcnet32_get_settings, | 1057 | .get_settings = pcnet32_get_settings, |
1035 | .set_settings = pcnet32_set_settings, | 1058 | .set_settings = pcnet32_set_settings, |
1036 | .get_drvinfo = pcnet32_get_drvinfo, | 1059 | .get_drvinfo = pcnet32_get_drvinfo, |
1037 | .get_msglevel = pcnet32_get_msglevel, | 1060 | .get_msglevel = pcnet32_get_msglevel, |
1038 | .set_msglevel = pcnet32_set_msglevel, | 1061 | .set_msglevel = pcnet32_set_msglevel, |
1039 | .nway_reset = pcnet32_nway_reset, | 1062 | .nway_reset = pcnet32_nway_reset, |
1040 | .get_link = pcnet32_get_link, | 1063 | .get_link = pcnet32_get_link, |
1041 | .get_ringparam = pcnet32_get_ringparam, | 1064 | .get_ringparam = pcnet32_get_ringparam, |
1042 | .set_ringparam = pcnet32_set_ringparam, | 1065 | .set_ringparam = pcnet32_set_ringparam, |
1043 | .get_tx_csum = ethtool_op_get_tx_csum, | 1066 | .get_tx_csum = ethtool_op_get_tx_csum, |
1044 | .get_sg = ethtool_op_get_sg, | 1067 | .get_sg = ethtool_op_get_sg, |
1045 | .get_tso = ethtool_op_get_tso, | 1068 | .get_tso = ethtool_op_get_tso, |
1046 | .get_strings = pcnet32_get_strings, | 1069 | .get_strings = pcnet32_get_strings, |
1047 | .self_test_count = pcnet32_self_test_count, | 1070 | .self_test_count = pcnet32_self_test_count, |
1048 | .self_test = pcnet32_ethtool_test, | 1071 | .self_test = pcnet32_ethtool_test, |
1049 | .phys_id = pcnet32_phys_id, | 1072 | .phys_id = pcnet32_phys_id, |
1050 | .get_regs_len = pcnet32_get_regs_len, | 1073 | .get_regs_len = pcnet32_get_regs_len, |
1051 | .get_regs = pcnet32_get_regs, | 1074 | .get_regs = pcnet32_get_regs, |
1052 | .get_perm_addr = ethtool_op_get_perm_addr, | 1075 | .get_perm_addr = ethtool_op_get_perm_addr, |
1053 | }; | 1076 | }; |
1054 | 1077 | ||
1055 | /* only probes for non-PCI devices, the rest are handled by | 1078 | /* only probes for non-PCI devices, the rest are handled by |
1056 | * pci_register_driver via pcnet32_probe_pci */ | 1079 | * pci_register_driver via pcnet32_probe_pci */ |
1057 | 1080 | ||
1058 | static void __devinit | 1081 | static void __devinit pcnet32_probe_vlbus(void) |
1059 | pcnet32_probe_vlbus(void) | ||
1060 | { | 1082 | { |
1061 | unsigned int *port, ioaddr; | 1083 | unsigned int *port, ioaddr; |
1062 | 1084 | ||
1063 | /* search for PCnet32 VLB cards at known addresses */ | 1085 | /* search for PCnet32 VLB cards at known addresses */ |
1064 | for (port = pcnet32_portlist; (ioaddr = *port); port++) { | 1086 | for (port = pcnet32_portlist; (ioaddr = *port); port++) { |
1065 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { | 1087 | if (request_region |
1066 | /* check if there is really a pcnet chip on that ioaddr */ | 1088 | (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { |
1067 | if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { | 1089 | /* check if there is really a pcnet chip on that ioaddr */ |
1068 | pcnet32_probe1(ioaddr, 0, NULL); | 1090 | if ((inb(ioaddr + 14) == 0x57) |
1069 | } else { | 1091 | && (inb(ioaddr + 15) == 0x57)) { |
1070 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | 1092 | pcnet32_probe1(ioaddr, 0, NULL); |
1071 | } | 1093 | } else { |
1072 | } | 1094 | release_region(ioaddr, PCNET32_TOTAL_SIZE); |
1073 | } | 1095 | } |
1096 | } | ||
1097 | } | ||
1074 | } | 1098 | } |
1075 | 1099 | ||
1076 | |||
1077 | static int __devinit | 1100 | static int __devinit |
1078 | pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | 1101 | pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) |
1079 | { | 1102 | { |
1080 | unsigned long ioaddr; | 1103 | unsigned long ioaddr; |
1081 | int err; | 1104 | int err; |
1082 | 1105 | ||
1083 | err = pci_enable_device(pdev); | 1106 | err = pci_enable_device(pdev); |
1084 | if (err < 0) { | 1107 | if (err < 0) { |
1085 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1108 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1086 | printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); | 1109 | printk(KERN_ERR PFX |
1087 | return err; | 1110 | "failed to enable device -- err=%d\n", err); |
1088 | } | 1111 | return err; |
1089 | pci_set_master(pdev); | 1112 | } |
1113 | pci_set_master(pdev); | ||
1114 | |||
1115 | ioaddr = pci_resource_start(pdev, 0); | ||
1116 | if (!ioaddr) { | ||
1117 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1118 | printk(KERN_ERR PFX | ||
1119 | "card has no PCI IO resources, aborting\n"); | ||
1120 | return -ENODEV; | ||
1121 | } | ||
1090 | 1122 | ||
1091 | ioaddr = pci_resource_start (pdev, 0); | 1123 | if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { |
1092 | if (!ioaddr) { | 1124 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1093 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1125 | printk(KERN_ERR PFX |
1094 | printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n"); | 1126 | "architecture does not support 32bit PCI busmaster DMA\n"); |
1095 | return -ENODEV; | 1127 | return -ENODEV; |
1096 | } | 1128 | } |
1129 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == | ||
1130 | NULL) { | ||
1131 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1132 | printk(KERN_ERR PFX | ||
1133 | "io address range already allocated\n"); | ||
1134 | return -EBUSY; | ||
1135 | } | ||
1097 | 1136 | ||
1098 | if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { | 1137 | err = pcnet32_probe1(ioaddr, 1, pdev); |
1099 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1138 | if (err < 0) { |
1100 | printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n"); | 1139 | pci_disable_device(pdev); |
1101 | return -ENODEV; | 1140 | } |
1102 | } | 1141 | return err; |
1103 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) { | ||
1104 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1105 | printk(KERN_ERR PFX "io address range already allocated\n"); | ||
1106 | return -EBUSY; | ||
1107 | } | ||
1108 | |||
1109 | err = pcnet32_probe1(ioaddr, 1, pdev); | ||
1110 | if (err < 0) { | ||
1111 | pci_disable_device(pdev); | ||
1112 | } | ||
1113 | return err; | ||
1114 | } | 1142 | } |
1115 | 1143 | ||
1116 | |||
1117 | /* pcnet32_probe1 | 1144 | /* pcnet32_probe1 |
1118 | * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. | 1145 | * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. |
1119 | * pdev will be NULL when called from pcnet32_probe_vlbus. | 1146 | * pdev will be NULL when called from pcnet32_probe_vlbus. |
@@ -1121,710 +1148,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1121 | static int __devinit | 1148 | static int __devinit |
1122 | pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | 1149 | pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) |
1123 | { | 1150 | { |
1124 | struct pcnet32_private *lp; | 1151 | struct pcnet32_private *lp; |
1125 | dma_addr_t lp_dma_addr; | 1152 | dma_addr_t lp_dma_addr; |
1126 | int i, media; | 1153 | int i, media; |
1127 | int fdx, mii, fset, dxsuflo; | 1154 | int fdx, mii, fset, dxsuflo; |
1128 | int chip_version; | 1155 | int chip_version; |
1129 | char *chipname; | 1156 | char *chipname; |
1130 | struct net_device *dev; | 1157 | struct net_device *dev; |
1131 | struct pcnet32_access *a = NULL; | 1158 | struct pcnet32_access *a = NULL; |
1132 | u8 promaddr[6]; | 1159 | u8 promaddr[6]; |
1133 | int ret = -ENODEV; | 1160 | int ret = -ENODEV; |
1134 | 1161 | ||
1135 | /* reset the chip */ | 1162 | /* reset the chip */ |
1136 | pcnet32_wio_reset(ioaddr); | 1163 | pcnet32_wio_reset(ioaddr); |
1137 | 1164 | ||
1138 | /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ | 1165 | /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ |
1139 | if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { | 1166 | if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { |
1140 | a = &pcnet32_wio; | 1167 | a = &pcnet32_wio; |
1141 | } else { | 1168 | } else { |
1142 | pcnet32_dwio_reset(ioaddr); | 1169 | pcnet32_dwio_reset(ioaddr); |
1143 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { | 1170 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 |
1144 | a = &pcnet32_dwio; | 1171 | && pcnet32_dwio_check(ioaddr)) { |
1145 | } else | 1172 | a = &pcnet32_dwio; |
1146 | goto err_release_region; | 1173 | } else |
1147 | } | 1174 | goto err_release_region; |
1148 | 1175 | } | |
1149 | chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16); | 1176 | |
1150 | if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) | 1177 | chip_version = |
1151 | printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version); | 1178 | a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); |
1152 | if ((chip_version & 0xfff) != 0x003) { | 1179 | if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) |
1153 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1180 | printk(KERN_INFO " PCnet chip version is %#x.\n", |
1154 | printk(KERN_INFO PFX "Unsupported chip version.\n"); | 1181 | chip_version); |
1155 | goto err_release_region; | 1182 | if ((chip_version & 0xfff) != 0x003) { |
1156 | } | 1183 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1157 | 1184 | printk(KERN_INFO PFX "Unsupported chip version.\n"); | |
1158 | /* initialize variables */ | 1185 | goto err_release_region; |
1159 | fdx = mii = fset = dxsuflo = 0; | 1186 | } |
1160 | chip_version = (chip_version >> 12) & 0xffff; | 1187 | |
1161 | 1188 | /* initialize variables */ | |
1162 | switch (chip_version) { | 1189 | fdx = mii = fset = dxsuflo = 0; |
1163 | case 0x2420: | 1190 | chip_version = (chip_version >> 12) & 0xffff; |
1164 | chipname = "PCnet/PCI 79C970"; /* PCI */ | 1191 | |
1165 | break; | 1192 | switch (chip_version) { |
1166 | case 0x2430: | 1193 | case 0x2420: |
1167 | if (shared) | 1194 | chipname = "PCnet/PCI 79C970"; /* PCI */ |
1168 | chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ | 1195 | break; |
1169 | else | 1196 | case 0x2430: |
1170 | chipname = "PCnet/32 79C965"; /* 486/VL bus */ | 1197 | if (shared) |
1171 | break; | 1198 | chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ |
1172 | case 0x2621: | 1199 | else |
1173 | chipname = "PCnet/PCI II 79C970A"; /* PCI */ | 1200 | chipname = "PCnet/32 79C965"; /* 486/VL bus */ |
1174 | fdx = 1; | 1201 | break; |
1175 | break; | 1202 | case 0x2621: |
1176 | case 0x2623: | 1203 | chipname = "PCnet/PCI II 79C970A"; /* PCI */ |
1177 | chipname = "PCnet/FAST 79C971"; /* PCI */ | 1204 | fdx = 1; |
1178 | fdx = 1; mii = 1; fset = 1; | 1205 | break; |
1179 | break; | 1206 | case 0x2623: |
1180 | case 0x2624: | 1207 | chipname = "PCnet/FAST 79C971"; /* PCI */ |
1181 | chipname = "PCnet/FAST+ 79C972"; /* PCI */ | 1208 | fdx = 1; |
1182 | fdx = 1; mii = 1; fset = 1; | 1209 | mii = 1; |
1183 | break; | 1210 | fset = 1; |
1184 | case 0x2625: | 1211 | break; |
1185 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | 1212 | case 0x2624: |
1186 | fdx = 1; mii = 1; | 1213 | chipname = "PCnet/FAST+ 79C972"; /* PCI */ |
1187 | break; | 1214 | fdx = 1; |
1188 | case 0x2626: | 1215 | mii = 1; |
1189 | chipname = "PCnet/Home 79C978"; /* PCI */ | 1216 | fset = 1; |
1190 | fdx = 1; | 1217 | break; |
1218 | case 0x2625: | ||
1219 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | ||
1220 | fdx = 1; | ||
1221 | mii = 1; | ||
1222 | break; | ||
1223 | case 0x2626: | ||
1224 | chipname = "PCnet/Home 79C978"; /* PCI */ | ||
1225 | fdx = 1; | ||
1226 | /* | ||
1227 | * This is based on specs published at www.amd.com. This section | ||
1228 | * assumes that a card with a 79C978 wants to go into standard | ||
1229 | * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, | ||
1230 | * and the module option homepna=1 can select this instead. | ||
1231 | */ | ||
1232 | media = a->read_bcr(ioaddr, 49); | ||
1233 | media &= ~3; /* default to 10Mb ethernet */ | ||
1234 | if (cards_found < MAX_UNITS && homepna[cards_found]) | ||
1235 | media |= 1; /* switch to home wiring mode */ | ||
1236 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1237 | printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", | ||
1238 | (media & 1) ? "1" : "10"); | ||
1239 | a->write_bcr(ioaddr, 49, media); | ||
1240 | break; | ||
1241 | case 0x2627: | ||
1242 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | ||
1243 | fdx = 1; | ||
1244 | mii = 1; | ||
1245 | break; | ||
1246 | case 0x2628: | ||
1247 | chipname = "PCnet/PRO 79C976"; | ||
1248 | fdx = 1; | ||
1249 | mii = 1; | ||
1250 | break; | ||
1251 | default: | ||
1252 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1253 | printk(KERN_INFO PFX | ||
1254 | "PCnet version %#x, no PCnet32 chip.\n", | ||
1255 | chip_version); | ||
1256 | goto err_release_region; | ||
1257 | } | ||
1258 | |||
1191 | /* | 1259 | /* |
1192 | * This is based on specs published at www.amd.com. This section | 1260 | * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit |
1193 | * assumes that a card with a 79C978 wants to go into standard | 1261 | * starting until the packet is loaded. Strike one for reliability, lose |
1194 | * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, | 1262 | * one for latency - although on PCI this isnt a big loss. Older chips |
1195 | * and the module option homepna=1 can select this instead. | 1263 | * have FIFO's smaller than a packet, so you can't do this. |
1264 | * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. | ||
1196 | */ | 1265 | */ |
1197 | media = a->read_bcr(ioaddr, 49); | 1266 | |
1198 | media &= ~3; /* default to 10Mb ethernet */ | 1267 | if (fset) { |
1199 | if (cards_found < MAX_UNITS && homepna[cards_found]) | 1268 | a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); |
1200 | media |= 1; /* switch to home wiring mode */ | 1269 | a->write_csr(ioaddr, 80, |
1201 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1270 | (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); |
1202 | printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", | 1271 | dxsuflo = 1; |
1203 | (media & 1) ? "1" : "10"); | 1272 | } |
1204 | a->write_bcr(ioaddr, 49, media); | 1273 | |
1205 | break; | 1274 | dev = alloc_etherdev(0); |
1206 | case 0x2627: | 1275 | if (!dev) { |
1207 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | 1276 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1208 | fdx = 1; mii = 1; | 1277 | printk(KERN_ERR PFX "Memory allocation failed.\n"); |
1209 | break; | 1278 | ret = -ENOMEM; |
1210 | case 0x2628: | 1279 | goto err_release_region; |
1211 | chipname = "PCnet/PRO 79C976"; | 1280 | } |
1212 | fdx = 1; mii = 1; | 1281 | SET_NETDEV_DEV(dev, &pdev->dev); |
1213 | break; | 1282 | |
1214 | default: | ||
1215 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1216 | printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n", | ||
1217 | chip_version); | ||
1218 | goto err_release_region; | ||
1219 | } | ||
1220 | |||
1221 | /* | ||
1222 | * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit | ||
1223 | * starting until the packet is loaded. Strike one for reliability, lose | ||
1224 | * one for latency - although on PCI this isnt a big loss. Older chips | ||
1225 | * have FIFO's smaller than a packet, so you can't do this. | ||
1226 | * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. | ||
1227 | */ | ||
1228 | |||
1229 | if (fset) { | ||
1230 | a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); | ||
1231 | a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); | ||
1232 | dxsuflo = 1; | ||
1233 | } | ||
1234 | |||
1235 | dev = alloc_etherdev(0); | ||
1236 | if (!dev) { | ||
1237 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1283 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1238 | printk(KERN_ERR PFX "Memory allocation failed.\n"); | 1284 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); |
1239 | ret = -ENOMEM; | 1285 | |
1240 | goto err_release_region; | 1286 | /* In most chips, after a chip reset, the ethernet address is read from the |
1241 | } | 1287 | * station address PROM at the base address and programmed into the |
1242 | SET_NETDEV_DEV(dev, &pdev->dev); | 1288 | * "Physical Address Registers" CSR12-14. |
1243 | 1289 | * As a precautionary measure, we read the PROM values and complain if | |
1244 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1290 | * they disagree with the CSRs. Either way, we use the CSR values, and |
1245 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); | 1291 | * double check that they are valid. |
1246 | 1292 | */ | |
1247 | /* In most chips, after a chip reset, the ethernet address is read from the | 1293 | for (i = 0; i < 3; i++) { |
1248 | * station address PROM at the base address and programmed into the | 1294 | unsigned int val; |
1249 | * "Physical Address Registers" CSR12-14. | 1295 | val = a->read_csr(ioaddr, i + 12) & 0x0ffff; |
1250 | * As a precautionary measure, we read the PROM values and complain if | 1296 | /* There may be endianness issues here. */ |
1251 | * they disagree with the CSRs. Either way, we use the CSR values, and | 1297 | dev->dev_addr[2 * i] = val & 0x0ff; |
1252 | * double check that they are valid. | 1298 | dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; |
1253 | */ | 1299 | } |
1254 | for (i = 0; i < 3; i++) { | 1300 | |
1255 | unsigned int val; | 1301 | /* read PROM address and compare with CSR address */ |
1256 | val = a->read_csr(ioaddr, i+12) & 0x0ffff; | ||
1257 | /* There may be endianness issues here. */ | ||
1258 | dev->dev_addr[2*i] = val & 0x0ff; | ||
1259 | dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff; | ||
1260 | } | ||
1261 | |||
1262 | /* read PROM address and compare with CSR address */ | ||
1263 | for (i = 0; i < 6; i++) | ||
1264 | promaddr[i] = inb(ioaddr + i); | ||
1265 | |||
1266 | if (memcmp(promaddr, dev->dev_addr, 6) | ||
1267 | || !is_valid_ether_addr(dev->dev_addr)) { | ||
1268 | if (is_valid_ether_addr(promaddr)) { | ||
1269 | if (pcnet32_debug & NETIF_MSG_PROBE) { | ||
1270 | printk(" warning: CSR address invalid,\n"); | ||
1271 | printk(KERN_INFO " using instead PROM address of"); | ||
1272 | } | ||
1273 | memcpy(dev->dev_addr, promaddr, 6); | ||
1274 | } | ||
1275 | } | ||
1276 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
1277 | |||
1278 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ | ||
1279 | if (!is_valid_ether_addr(dev->perm_addr)) | ||
1280 | memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); | ||
1281 | |||
1282 | if (pcnet32_debug & NETIF_MSG_PROBE) { | ||
1283 | for (i = 0; i < 6; i++) | 1302 | for (i = 0; i < 6; i++) |
1284 | printk(" %2.2x", dev->dev_addr[i]); | 1303 | promaddr[i] = inb(ioaddr + i); |
1285 | 1304 | ||
1286 | /* Version 0x2623 and 0x2624 */ | 1305 | if (memcmp(promaddr, dev->dev_addr, 6) |
1287 | if (((chip_version + 1) & 0xfffe) == 0x2624) { | 1306 | || !is_valid_ether_addr(dev->dev_addr)) { |
1288 | i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ | 1307 | if (is_valid_ether_addr(promaddr)) { |
1289 | printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i); | 1308 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
1290 | switch(i>>10) { | 1309 | printk(" warning: CSR address invalid,\n"); |
1291 | case 0: printk(" 20 bytes,"); break; | 1310 | printk(KERN_INFO |
1292 | case 1: printk(" 64 bytes,"); break; | 1311 | " using instead PROM address of"); |
1293 | case 2: printk(" 128 bytes,"); break; | 1312 | } |
1294 | case 3: printk("~220 bytes,"); break; | 1313 | memcpy(dev->dev_addr, promaddr, 6); |
1295 | } | 1314 | } |
1296 | i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ | 1315 | } |
1297 | printk(" BCR18(%x):",i&0xffff); | 1316 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1298 | if (i & (1<<5)) printk("BurstWrEn "); | 1317 | |
1299 | if (i & (1<<6)) printk("BurstRdEn "); | 1318 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ |
1300 | if (i & (1<<7)) printk("DWordIO "); | 1319 | if (!is_valid_ether_addr(dev->perm_addr)) |
1301 | if (i & (1<<11)) printk("NoUFlow "); | 1320 | memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); |
1302 | i = a->read_bcr(ioaddr, 25); | 1321 | |
1303 | printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8); | 1322 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
1304 | i = a->read_bcr(ioaddr, 26); | 1323 | for (i = 0; i < 6; i++) |
1305 | printk(" SRAM_BND=0x%04x,",i<<8); | 1324 | printk(" %2.2x", dev->dev_addr[i]); |
1306 | i = a->read_bcr(ioaddr, 27); | 1325 | |
1307 | if (i & (1<<14)) printk("LowLatRx"); | 1326 | /* Version 0x2623 and 0x2624 */ |
1308 | } | 1327 | if (((chip_version + 1) & 0xfffe) == 0x2624) { |
1309 | } | 1328 | i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ |
1310 | 1329 | printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); | |
1311 | dev->base_addr = ioaddr; | 1330 | switch (i >> 10) { |
1312 | /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ | 1331 | case 0: |
1313 | if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { | 1332 | printk(" 20 bytes,"); |
1314 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1333 | break; |
1315 | printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); | 1334 | case 1: |
1316 | ret = -ENOMEM; | 1335 | printk(" 64 bytes,"); |
1317 | goto err_free_netdev; | 1336 | break; |
1318 | } | 1337 | case 2: |
1319 | 1338 | printk(" 128 bytes,"); | |
1320 | memset(lp, 0, sizeof(*lp)); | 1339 | break; |
1321 | lp->dma_addr = lp_dma_addr; | 1340 | case 3: |
1322 | lp->pci_dev = pdev; | 1341 | printk("~220 bytes,"); |
1323 | 1342 | break; | |
1324 | spin_lock_init(&lp->lock); | 1343 | } |
1325 | 1344 | i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ | |
1326 | SET_MODULE_OWNER(dev); | 1345 | printk(" BCR18(%x):", i & 0xffff); |
1327 | SET_NETDEV_DEV(dev, &pdev->dev); | 1346 | if (i & (1 << 5)) |
1328 | dev->priv = lp; | 1347 | printk("BurstWrEn "); |
1329 | lp->name = chipname; | 1348 | if (i & (1 << 6)) |
1330 | lp->shared_irq = shared; | 1349 | printk("BurstRdEn "); |
1331 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ | 1350 | if (i & (1 << 7)) |
1332 | lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ | 1351 | printk("DWordIO "); |
1333 | lp->tx_mod_mask = lp->tx_ring_size - 1; | 1352 | if (i & (1 << 11)) |
1334 | lp->rx_mod_mask = lp->rx_ring_size - 1; | 1353 | printk("NoUFlow "); |
1335 | lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); | 1354 | i = a->read_bcr(ioaddr, 25); |
1336 | lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); | 1355 | printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); |
1337 | lp->mii_if.full_duplex = fdx; | 1356 | i = a->read_bcr(ioaddr, 26); |
1338 | lp->mii_if.phy_id_mask = 0x1f; | 1357 | printk(" SRAM_BND=0x%04x,", i << 8); |
1339 | lp->mii_if.reg_num_mask = 0x1f; | 1358 | i = a->read_bcr(ioaddr, 27); |
1340 | lp->dxsuflo = dxsuflo; | 1359 | if (i & (1 << 14)) |
1341 | lp->mii = mii; | 1360 | printk("LowLatRx"); |
1342 | lp->msg_enable = pcnet32_debug; | 1361 | } |
1343 | if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) | 1362 | } |
1344 | lp->options = PCNET32_PORT_ASEL; | 1363 | |
1345 | else | 1364 | dev->base_addr = ioaddr; |
1346 | lp->options = options_mapping[options[cards_found]]; | 1365 | /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ |
1347 | lp->mii_if.dev = dev; | 1366 | if ((lp = |
1348 | lp->mii_if.mdio_read = mdio_read; | 1367 | pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { |
1349 | lp->mii_if.mdio_write = mdio_write; | 1368 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1350 | 1369 | printk(KERN_ERR PFX | |
1351 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && | 1370 | "Consistent memory allocation failed.\n"); |
1352 | ((cards_found>=MAX_UNITS) || full_duplex[cards_found])) | 1371 | ret = -ENOMEM; |
1353 | lp->options |= PCNET32_PORT_FD; | 1372 | goto err_free_netdev; |
1354 | 1373 | } | |
1355 | if (!a) { | 1374 | |
1356 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1375 | memset(lp, 0, sizeof(*lp)); |
1357 | printk(KERN_ERR PFX "No access methods\n"); | 1376 | lp->dma_addr = lp_dma_addr; |
1358 | ret = -ENODEV; | 1377 | lp->pci_dev = pdev; |
1359 | goto err_free_consistent; | 1378 | |
1360 | } | 1379 | spin_lock_init(&lp->lock); |
1361 | lp->a = *a; | 1380 | |
1362 | 1381 | SET_MODULE_OWNER(dev); | |
1363 | /* prior to register_netdev, dev->name is not yet correct */ | 1382 | SET_NETDEV_DEV(dev, &pdev->dev); |
1364 | if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { | 1383 | dev->priv = lp; |
1365 | ret = -ENOMEM; | 1384 | lp->name = chipname; |
1366 | goto err_free_ring; | 1385 | lp->shared_irq = shared; |
1367 | } | 1386 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ |
1368 | /* detect special T1/E1 WAN card by checking for MAC address */ | 1387 | lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ |
1369 | if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 | 1388 | lp->tx_mod_mask = lp->tx_ring_size - 1; |
1389 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
1390 | lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); | ||
1391 | lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); | ||
1392 | lp->mii_if.full_duplex = fdx; | ||
1393 | lp->mii_if.phy_id_mask = 0x1f; | ||
1394 | lp->mii_if.reg_num_mask = 0x1f; | ||
1395 | lp->dxsuflo = dxsuflo; | ||
1396 | lp->mii = mii; | ||
1397 | lp->msg_enable = pcnet32_debug; | ||
1398 | if ((cards_found >= MAX_UNITS) | ||
1399 | || (options[cards_found] > sizeof(options_mapping))) | ||
1400 | lp->options = PCNET32_PORT_ASEL; | ||
1401 | else | ||
1402 | lp->options = options_mapping[options[cards_found]]; | ||
1403 | lp->mii_if.dev = dev; | ||
1404 | lp->mii_if.mdio_read = mdio_read; | ||
1405 | lp->mii_if.mdio_write = mdio_write; | ||
1406 | |||
1407 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && | ||
1408 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) | ||
1409 | lp->options |= PCNET32_PORT_FD; | ||
1410 | |||
1411 | if (!a) { | ||
1412 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1413 | printk(KERN_ERR PFX "No access methods\n"); | ||
1414 | ret = -ENODEV; | ||
1415 | goto err_free_consistent; | ||
1416 | } | ||
1417 | lp->a = *a; | ||
1418 | |||
1419 | /* prior to register_netdev, dev->name is not yet correct */ | ||
1420 | if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { | ||
1421 | ret = -ENOMEM; | ||
1422 | goto err_free_ring; | ||
1423 | } | ||
1424 | /* detect special T1/E1 WAN card by checking for MAC address */ | ||
1425 | if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 | ||
1370 | && dev->dev_addr[2] == 0x75) | 1426 | && dev->dev_addr[2] == 0x75) |
1371 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; | 1427 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; |
1372 | |||
1373 | lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ | ||
1374 | lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); | ||
1375 | for (i = 0; i < 6; i++) | ||
1376 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | ||
1377 | lp->init_block.filter[0] = 0x00000000; | ||
1378 | lp->init_block.filter[1] = 0x00000000; | ||
1379 | lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); | ||
1380 | lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); | ||
1381 | |||
1382 | /* switch pcnet32 to 32bit mode */ | ||
1383 | a->write_bcr(ioaddr, 20, 2); | ||
1384 | |||
1385 | a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1386 | init_block)) & 0xffff); | ||
1387 | a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1388 | init_block)) >> 16); | ||
1389 | |||
1390 | if (pdev) { /* use the IRQ provided by PCI */ | ||
1391 | dev->irq = pdev->irq; | ||
1392 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1393 | printk(" assigned IRQ %d.\n", dev->irq); | ||
1394 | } else { | ||
1395 | unsigned long irq_mask = probe_irq_on(); | ||
1396 | 1428 | ||
1397 | /* | 1429 | lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ |
1398 | * To auto-IRQ we enable the initialization-done and DMA error | 1430 | lp->init_block.tlen_rlen = |
1399 | * interrupts. For ISA boards we get a DMA error, but VLB and PCI | 1431 | le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); |
1400 | * boards will work. | 1432 | for (i = 0; i < 6; i++) |
1401 | */ | 1433 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; |
1402 | /* Trigger an initialization just for the interrupt. */ | 1434 | lp->init_block.filter[0] = 0x00000000; |
1403 | a->write_csr (ioaddr, 0, 0x41); | 1435 | lp->init_block.filter[1] = 0x00000000; |
1404 | mdelay (1); | 1436 | lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); |
1437 | lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); | ||
1438 | |||
1439 | /* switch pcnet32 to 32bit mode */ | ||
1440 | a->write_bcr(ioaddr, 20, 2); | ||
1441 | |||
1442 | a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1443 | init_block)) & 0xffff); | ||
1444 | a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1445 | init_block)) >> 16); | ||
1446 | |||
1447 | if (pdev) { /* use the IRQ provided by PCI */ | ||
1448 | dev->irq = pdev->irq; | ||
1449 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1450 | printk(" assigned IRQ %d.\n", dev->irq); | ||
1451 | } else { | ||
1452 | unsigned long irq_mask = probe_irq_on(); | ||
1453 | |||
1454 | /* | ||
1455 | * To auto-IRQ we enable the initialization-done and DMA error | ||
1456 | * interrupts. For ISA boards we get a DMA error, but VLB and PCI | ||
1457 | * boards will work. | ||
1458 | */ | ||
1459 | /* Trigger an initialization just for the interrupt. */ | ||
1460 | a->write_csr(ioaddr, 0, 0x41); | ||
1461 | mdelay(1); | ||
1462 | |||
1463 | dev->irq = probe_irq_off(irq_mask); | ||
1464 | if (!dev->irq) { | ||
1465 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1466 | printk(", failed to detect IRQ line.\n"); | ||
1467 | ret = -ENODEV; | ||
1468 | goto err_free_ring; | ||
1469 | } | ||
1470 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1471 | printk(", probed IRQ %d.\n", dev->irq); | ||
1472 | } | ||
1405 | 1473 | ||
1406 | dev->irq = probe_irq_off (irq_mask); | 1474 | /* Set the mii phy_id so that we can query the link state */ |
1407 | if (!dev->irq) { | 1475 | if (lp->mii) { |
1408 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1476 | /* lp->phycount and lp->phymask are set to 0 by memset above */ |
1409 | printk(", failed to detect IRQ line.\n"); | 1477 | |
1410 | ret = -ENODEV; | 1478 | lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; |
1411 | goto err_free_ring; | 1479 | /* scan for PHYs */ |
1480 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { | ||
1481 | unsigned short id1, id2; | ||
1482 | |||
1483 | id1 = mdio_read(dev, i, MII_PHYSID1); | ||
1484 | if (id1 == 0xffff) | ||
1485 | continue; | ||
1486 | id2 = mdio_read(dev, i, MII_PHYSID2); | ||
1487 | if (id2 == 0xffff) | ||
1488 | continue; | ||
1489 | if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) | ||
1490 | continue; /* 79C971 & 79C972 have phantom phy at id 31 */ | ||
1491 | lp->phycount++; | ||
1492 | lp->phymask |= (1 << i); | ||
1493 | lp->mii_if.phy_id = i; | ||
1494 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1495 | printk(KERN_INFO PFX | ||
1496 | "Found PHY %04x:%04x at address %d.\n", | ||
1497 | id1, id2, i); | ||
1498 | } | ||
1499 | lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); | ||
1500 | if (lp->phycount > 1) { | ||
1501 | lp->options |= PCNET32_PORT_MII; | ||
1502 | } | ||
1412 | } | 1503 | } |
1413 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1504 | |
1414 | printk(", probed IRQ %d.\n", dev->irq); | 1505 | init_timer(&lp->watchdog_timer); |
1415 | } | 1506 | lp->watchdog_timer.data = (unsigned long)dev; |
1416 | 1507 | lp->watchdog_timer.function = (void *)&pcnet32_watchdog; | |
1417 | /* Set the mii phy_id so that we can query the link state */ | 1508 | |
1418 | if (lp->mii) { | 1509 | /* The PCNET32-specific entries in the device structure. */ |
1419 | /* lp->phycount and lp->phymask are set to 0 by memset above */ | 1510 | dev->open = &pcnet32_open; |
1420 | 1511 | dev->hard_start_xmit = &pcnet32_start_xmit; | |
1421 | lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f; | 1512 | dev->stop = &pcnet32_close; |
1422 | /* scan for PHYs */ | 1513 | dev->get_stats = &pcnet32_get_stats; |
1423 | for (i=0; i<PCNET32_MAX_PHYS; i++) { | 1514 | dev->set_multicast_list = &pcnet32_set_multicast_list; |
1424 | unsigned short id1, id2; | 1515 | dev->do_ioctl = &pcnet32_ioctl; |
1425 | 1516 | dev->ethtool_ops = &pcnet32_ethtool_ops; | |
1426 | id1 = mdio_read(dev, i, MII_PHYSID1); | 1517 | dev->tx_timeout = pcnet32_tx_timeout; |
1427 | if (id1 == 0xffff) | 1518 | dev->watchdog_timeo = (5 * HZ); |
1428 | continue; | ||
1429 | id2 = mdio_read(dev, i, MII_PHYSID2); | ||
1430 | if (id2 == 0xffff) | ||
1431 | continue; | ||
1432 | if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) | ||
1433 | continue; /* 79C971 & 79C972 have phantom phy at id 31 */ | ||
1434 | lp->phycount++; | ||
1435 | lp->phymask |= (1 << i); | ||
1436 | lp->mii_if.phy_id = i; | ||
1437 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1438 | printk(KERN_INFO PFX "Found PHY %04x:%04x at address %d.\n", | ||
1439 | id1, id2, i); | ||
1440 | } | ||
1441 | lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); | ||
1442 | if (lp->phycount > 1) { | ||
1443 | lp->options |= PCNET32_PORT_MII; | ||
1444 | } | ||
1445 | } | ||
1446 | |||
1447 | init_timer (&lp->watchdog_timer); | ||
1448 | lp->watchdog_timer.data = (unsigned long) dev; | ||
1449 | lp->watchdog_timer.function = (void *) &pcnet32_watchdog; | ||
1450 | |||
1451 | /* The PCNET32-specific entries in the device structure. */ | ||
1452 | dev->open = &pcnet32_open; | ||
1453 | dev->hard_start_xmit = &pcnet32_start_xmit; | ||
1454 | dev->stop = &pcnet32_close; | ||
1455 | dev->get_stats = &pcnet32_get_stats; | ||
1456 | dev->set_multicast_list = &pcnet32_set_multicast_list; | ||
1457 | dev->do_ioctl = &pcnet32_ioctl; | ||
1458 | dev->ethtool_ops = &pcnet32_ethtool_ops; | ||
1459 | dev->tx_timeout = pcnet32_tx_timeout; | ||
1460 | dev->watchdog_timeo = (5*HZ); | ||
1461 | 1519 | ||
1462 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1520 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1463 | dev->poll_controller = pcnet32_poll_controller; | 1521 | dev->poll_controller = pcnet32_poll_controller; |
1464 | #endif | 1522 | #endif |
1465 | 1523 | ||
1466 | /* Fill in the generic fields of the device structure. */ | 1524 | /* Fill in the generic fields of the device structure. */ |
1467 | if (register_netdev(dev)) | 1525 | if (register_netdev(dev)) |
1468 | goto err_free_ring; | 1526 | goto err_free_ring; |
1469 | 1527 | ||
1470 | if (pdev) { | 1528 | if (pdev) { |
1471 | pci_set_drvdata(pdev, dev); | 1529 | pci_set_drvdata(pdev, dev); |
1472 | } else { | 1530 | } else { |
1473 | lp->next = pcnet32_dev; | 1531 | lp->next = pcnet32_dev; |
1474 | pcnet32_dev = dev; | 1532 | pcnet32_dev = dev; |
1475 | } | 1533 | } |
1476 | 1534 | ||
1477 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1535 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1478 | printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); | 1536 | printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); |
1479 | cards_found++; | 1537 | cards_found++; |
1480 | 1538 | ||
1481 | /* enable LED writes */ | 1539 | /* enable LED writes */ |
1482 | a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); | 1540 | a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); |
1483 | |||
1484 | return 0; | ||
1485 | |||
1486 | err_free_ring: | ||
1487 | pcnet32_free_ring(dev); | ||
1488 | err_free_consistent: | ||
1489 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | ||
1490 | err_free_netdev: | ||
1491 | free_netdev(dev); | ||
1492 | err_release_region: | ||
1493 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | ||
1494 | return ret; | ||
1495 | } | ||
1496 | 1541 | ||
1542 | return 0; | ||
1543 | |||
1544 | err_free_ring: | ||
1545 | pcnet32_free_ring(dev); | ||
1546 | err_free_consistent: | ||
1547 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | ||
1548 | err_free_netdev: | ||
1549 | free_netdev(dev); | ||
1550 | err_release_region: | ||
1551 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | ||
1552 | return ret; | ||
1553 | } | ||
1497 | 1554 | ||
1498 | /* if any allocation fails, caller must also call pcnet32_free_ring */ | 1555 | /* if any allocation fails, caller must also call pcnet32_free_ring */ |
1499 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) | 1556 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) |
1500 | { | 1557 | { |
1501 | struct pcnet32_private *lp = dev->priv; | 1558 | struct pcnet32_private *lp = dev->priv; |
1502 | 1559 | ||
1503 | lp->tx_ring = pci_alloc_consistent(lp->pci_dev, | 1560 | lp->tx_ring = pci_alloc_consistent(lp->pci_dev, |
1504 | sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, | 1561 | sizeof(struct pcnet32_tx_head) * |
1505 | &lp->tx_ring_dma_addr); | 1562 | lp->tx_ring_size, |
1506 | if (lp->tx_ring == NULL) { | 1563 | &lp->tx_ring_dma_addr); |
1507 | if (pcnet32_debug & NETIF_MSG_DRV) | 1564 | if (lp->tx_ring == NULL) { |
1508 | printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", | 1565 | if (pcnet32_debug & NETIF_MSG_DRV) |
1509 | name); | 1566 | printk("\n" KERN_ERR PFX |
1510 | return -ENOMEM; | 1567 | "%s: Consistent memory allocation failed.\n", |
1511 | } | 1568 | name); |
1512 | 1569 | return -ENOMEM; | |
1513 | lp->rx_ring = pci_alloc_consistent(lp->pci_dev, | 1570 | } |
1514 | sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, | ||
1515 | &lp->rx_ring_dma_addr); | ||
1516 | if (lp->rx_ring == NULL) { | ||
1517 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1518 | printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", | ||
1519 | name); | ||
1520 | return -ENOMEM; | ||
1521 | } | ||
1522 | |||
1523 | lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, | ||
1524 | GFP_ATOMIC); | ||
1525 | if (!lp->tx_dma_addr) { | ||
1526 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1527 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1528 | return -ENOMEM; | ||
1529 | } | ||
1530 | memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); | ||
1531 | |||
1532 | lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, | ||
1533 | GFP_ATOMIC); | ||
1534 | if (!lp->rx_dma_addr) { | ||
1535 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1536 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1537 | return -ENOMEM; | ||
1538 | } | ||
1539 | memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); | ||
1540 | |||
1541 | lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, | ||
1542 | GFP_ATOMIC); | ||
1543 | if (!lp->tx_skbuff) { | ||
1544 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1545 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1546 | return -ENOMEM; | ||
1547 | } | ||
1548 | memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); | ||
1549 | |||
1550 | lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, | ||
1551 | GFP_ATOMIC); | ||
1552 | if (!lp->rx_skbuff) { | ||
1553 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1554 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1555 | return -ENOMEM; | ||
1556 | } | ||
1557 | memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); | ||
1558 | 1571 | ||
1559 | return 0; | 1572 | lp->rx_ring = pci_alloc_consistent(lp->pci_dev, |
1560 | } | 1573 | sizeof(struct pcnet32_rx_head) * |
1574 | lp->rx_ring_size, | ||
1575 | &lp->rx_ring_dma_addr); | ||
1576 | if (lp->rx_ring == NULL) { | ||
1577 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1578 | printk("\n" KERN_ERR PFX | ||
1579 | "%s: Consistent memory allocation failed.\n", | ||
1580 | name); | ||
1581 | return -ENOMEM; | ||
1582 | } | ||
1561 | 1583 | ||
1584 | lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, | ||
1585 | GFP_ATOMIC); | ||
1586 | if (!lp->tx_dma_addr) { | ||
1587 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1588 | printk("\n" KERN_ERR PFX | ||
1589 | "%s: Memory allocation failed.\n", name); | ||
1590 | return -ENOMEM; | ||
1591 | } | ||
1592 | memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); | ||
1593 | |||
1594 | lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, | ||
1595 | GFP_ATOMIC); | ||
1596 | if (!lp->rx_dma_addr) { | ||
1597 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1598 | printk("\n" KERN_ERR PFX | ||
1599 | "%s: Memory allocation failed.\n", name); | ||
1600 | return -ENOMEM; | ||
1601 | } | ||
1602 | memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); | ||
1603 | |||
1604 | lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, | ||
1605 | GFP_ATOMIC); | ||
1606 | if (!lp->tx_skbuff) { | ||
1607 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1608 | printk("\n" KERN_ERR PFX | ||
1609 | "%s: Memory allocation failed.\n", name); | ||
1610 | return -ENOMEM; | ||
1611 | } | ||
1612 | memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); | ||
1613 | |||
1614 | lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, | ||
1615 | GFP_ATOMIC); | ||
1616 | if (!lp->rx_skbuff) { | ||
1617 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1618 | printk("\n" KERN_ERR PFX | ||
1619 | "%s: Memory allocation failed.\n", name); | ||
1620 | return -ENOMEM; | ||
1621 | } | ||
1622 | memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); | ||
1623 | |||
1624 | return 0; | ||
1625 | } | ||
1562 | 1626 | ||
1563 | static void pcnet32_free_ring(struct net_device *dev) | 1627 | static void pcnet32_free_ring(struct net_device *dev) |
1564 | { | 1628 | { |
1565 | struct pcnet32_private *lp = dev->priv; | 1629 | struct pcnet32_private *lp = dev->priv; |
1566 | 1630 | ||
1567 | kfree(lp->tx_skbuff); | 1631 | kfree(lp->tx_skbuff); |
1568 | lp->tx_skbuff = NULL; | 1632 | lp->tx_skbuff = NULL; |
1569 | 1633 | ||
1570 | kfree(lp->rx_skbuff); | 1634 | kfree(lp->rx_skbuff); |
1571 | lp->rx_skbuff = NULL; | 1635 | lp->rx_skbuff = NULL; |
1572 | 1636 | ||
1573 | kfree(lp->tx_dma_addr); | 1637 | kfree(lp->tx_dma_addr); |
1574 | lp->tx_dma_addr = NULL; | 1638 | lp->tx_dma_addr = NULL; |
1575 | 1639 | ||
1576 | kfree(lp->rx_dma_addr); | 1640 | kfree(lp->rx_dma_addr); |
1577 | lp->rx_dma_addr = NULL; | 1641 | lp->rx_dma_addr = NULL; |
1578 | 1642 | ||
1579 | if (lp->tx_ring) { | 1643 | if (lp->tx_ring) { |
1580 | pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, | 1644 | pci_free_consistent(lp->pci_dev, |
1581 | lp->tx_ring, lp->tx_ring_dma_addr); | 1645 | sizeof(struct pcnet32_tx_head) * |
1582 | lp->tx_ring = NULL; | 1646 | lp->tx_ring_size, lp->tx_ring, |
1583 | } | 1647 | lp->tx_ring_dma_addr); |
1648 | lp->tx_ring = NULL; | ||
1649 | } | ||
1584 | 1650 | ||
1585 | if (lp->rx_ring) { | 1651 | if (lp->rx_ring) { |
1586 | pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, | 1652 | pci_free_consistent(lp->pci_dev, |
1587 | lp->rx_ring, lp->rx_ring_dma_addr); | 1653 | sizeof(struct pcnet32_rx_head) * |
1588 | lp->rx_ring = NULL; | 1654 | lp->rx_ring_size, lp->rx_ring, |
1589 | } | 1655 | lp->rx_ring_dma_addr); |
1656 | lp->rx_ring = NULL; | ||
1657 | } | ||
1590 | } | 1658 | } |
1591 | 1659 | ||
1592 | 1660 | static int pcnet32_open(struct net_device *dev) | |
1593 | static int | ||
1594 | pcnet32_open(struct net_device *dev) | ||
1595 | { | 1661 | { |
1596 | struct pcnet32_private *lp = dev->priv; | 1662 | struct pcnet32_private *lp = dev->priv; |
1597 | unsigned long ioaddr = dev->base_addr; | 1663 | unsigned long ioaddr = dev->base_addr; |
1598 | u16 val; | 1664 | u16 val; |
1599 | int i; | 1665 | int i; |
1600 | int rc; | 1666 | int rc; |
1601 | unsigned long flags; | 1667 | unsigned long flags; |
1602 | 1668 | ||
1603 | if (request_irq(dev->irq, &pcnet32_interrupt, | 1669 | if (request_irq(dev->irq, &pcnet32_interrupt, |
1604 | lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) { | 1670 | lp->shared_irq ? SA_SHIRQ : 0, dev->name, |
1605 | return -EAGAIN; | 1671 | (void *)dev)) { |
1606 | } | 1672 | return -EAGAIN; |
1607 | 1673 | } | |
1608 | spin_lock_irqsave(&lp->lock, flags); | 1674 | |
1609 | /* Check for a valid station address */ | 1675 | spin_lock_irqsave(&lp->lock, flags); |
1610 | if (!is_valid_ether_addr(dev->dev_addr)) { | 1676 | /* Check for a valid station address */ |
1611 | rc = -EINVAL; | 1677 | if (!is_valid_ether_addr(dev->dev_addr)) { |
1612 | goto err_free_irq; | 1678 | rc = -EINVAL; |
1613 | } | 1679 | goto err_free_irq; |
1614 | 1680 | } | |
1615 | /* Reset the PCNET32 */ | 1681 | |
1616 | lp->a.reset (ioaddr); | 1682 | /* Reset the PCNET32 */ |
1617 | 1683 | lp->a.reset(ioaddr); | |
1618 | /* switch pcnet32 to 32bit mode */ | 1684 | |
1619 | lp->a.write_bcr (ioaddr, 20, 2); | 1685 | /* switch pcnet32 to 32bit mode */ |
1620 | 1686 | lp->a.write_bcr(ioaddr, 20, 2); | |
1621 | if (netif_msg_ifup(lp)) | 1687 | |
1622 | printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", | 1688 | if (netif_msg_ifup(lp)) |
1623 | dev->name, dev->irq, | 1689 | printk(KERN_DEBUG |
1624 | (u32) (lp->tx_ring_dma_addr), | 1690 | "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", |
1625 | (u32) (lp->rx_ring_dma_addr), | 1691 | dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr), |
1626 | (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); | 1692 | (u32) (lp->rx_ring_dma_addr), |
1627 | 1693 | (u32) (lp->dma_addr + | |
1628 | /* set/reset autoselect bit */ | 1694 | offsetof(struct pcnet32_private, init_block))); |
1629 | val = lp->a.read_bcr (ioaddr, 2) & ~2; | 1695 | |
1630 | if (lp->options & PCNET32_PORT_ASEL) | 1696 | /* set/reset autoselect bit */ |
1631 | val |= 2; | 1697 | val = lp->a.read_bcr(ioaddr, 2) & ~2; |
1632 | lp->a.write_bcr (ioaddr, 2, val); | 1698 | if (lp->options & PCNET32_PORT_ASEL) |
1633 | |||
1634 | /* handle full duplex setting */ | ||
1635 | if (lp->mii_if.full_duplex) { | ||
1636 | val = lp->a.read_bcr (ioaddr, 9) & ~3; | ||
1637 | if (lp->options & PCNET32_PORT_FD) { | ||
1638 | val |= 1; | ||
1639 | if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) | ||
1640 | val |= 2; | 1699 | val |= 2; |
1641 | } else if (lp->options & PCNET32_PORT_ASEL) { | 1700 | lp->a.write_bcr(ioaddr, 2, val); |
1642 | /* workaround of xSeries250, turn on for 79C975 only */ | 1701 | |
1643 | i = ((lp->a.read_csr(ioaddr, 88) | | 1702 | /* handle full duplex setting */ |
1644 | (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff; | 1703 | if (lp->mii_if.full_duplex) { |
1645 | if (i == 0x2627) | 1704 | val = lp->a.read_bcr(ioaddr, 9) & ~3; |
1646 | val |= 3; | 1705 | if (lp->options & PCNET32_PORT_FD) { |
1647 | } | 1706 | val |= 1; |
1648 | lp->a.write_bcr (ioaddr, 9, val); | 1707 | if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) |
1649 | } | 1708 | val |= 2; |
1650 | 1709 | } else if (lp->options & PCNET32_PORT_ASEL) { | |
1651 | /* set/reset GPSI bit in test register */ | 1710 | /* workaround of xSeries250, turn on for 79C975 only */ |
1652 | val = lp->a.read_csr (ioaddr, 124) & ~0x10; | 1711 | i = ((lp->a.read_csr(ioaddr, 88) | |
1653 | if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) | 1712 | (lp->a. |
1654 | val |= 0x10; | 1713 | read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; |
1655 | lp->a.write_csr (ioaddr, 124, val); | 1714 | if (i == 0x2627) |
1656 | 1715 | val |= 3; | |
1657 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | 1716 | } |
1658 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | 1717 | lp->a.write_bcr(ioaddr, 9, val); |
1718 | } | ||
1719 | |||
1720 | /* set/reset GPSI bit in test register */ | ||
1721 | val = lp->a.read_csr(ioaddr, 124) & ~0x10; | ||
1722 | if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) | ||
1723 | val |= 0x10; | ||
1724 | lp->a.write_csr(ioaddr, 124, val); | ||
1725 | |||
1726 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | ||
1727 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | ||
1659 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || | 1728 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || |
1660 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { | 1729 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { |
1661 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1662 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; | ||
1663 | if (netif_msg_link(lp)) | ||
1664 | printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n", | ||
1665 | dev->name); | ||
1666 | } | ||
1667 | } | ||
1668 | if (lp->phycount < 2) { | ||
1669 | /* | ||
1670 | * 24 Jun 2004 according AMD, in order to change the PHY, | ||
1671 | * DANAS (or DISPM for 79C976) must be set; then select the speed, | ||
1672 | * duplex, and/or enable auto negotiation, and clear DANAS | ||
1673 | */ | ||
1674 | if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { | ||
1675 | lp->a.write_bcr(ioaddr, 32, | ||
1676 | lp->a.read_bcr(ioaddr, 32) | 0x0080); | ||
1677 | /* disable Auto Negotiation, set 10Mpbs, HD */ | ||
1678 | val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; | ||
1679 | if (lp->options & PCNET32_PORT_FD) | ||
1680 | val |= 0x10; | ||
1681 | if (lp->options & PCNET32_PORT_100) | ||
1682 | val |= 0x08; | ||
1683 | lp->a.write_bcr (ioaddr, 32, val); | ||
1684 | } else { | ||
1685 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1686 | lp->a.write_bcr(ioaddr, 32, | ||
1687 | lp->a.read_bcr(ioaddr, 32) | 0x0080); | ||
1688 | /* enable auto negotiate, setup, disable fd */ | ||
1689 | val = lp->a.read_bcr(ioaddr, 32) & ~0x98; | ||
1690 | val |= 0x20; | ||
1691 | lp->a.write_bcr(ioaddr, 32, val); | ||
1692 | } | ||
1693 | } | ||
1694 | } else { | ||
1695 | int first_phy = -1; | ||
1696 | u16 bmcr; | ||
1697 | u32 bcr9; | ||
1698 | struct ethtool_cmd ecmd; | ||
1699 | |||
1700 | /* | ||
1701 | * There is really no good other way to handle multiple PHYs | ||
1702 | * other than turning off all automatics | ||
1703 | */ | ||
1704 | val = lp->a.read_bcr(ioaddr, 2); | ||
1705 | lp->a.write_bcr(ioaddr, 2, val & ~2); | ||
1706 | val = lp->a.read_bcr(ioaddr, 32); | ||
1707 | lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ | ||
1708 | |||
1709 | if (!(lp->options & PCNET32_PORT_ASEL)) { | ||
1710 | /* setup ecmd */ | ||
1711 | ecmd.port = PORT_MII; | ||
1712 | ecmd.transceiver = XCVR_INTERNAL; | ||
1713 | ecmd.autoneg = AUTONEG_DISABLE; | ||
1714 | ecmd.speed = lp->options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; | ||
1715 | bcr9 = lp->a.read_bcr(ioaddr, 9); | ||
1716 | |||
1717 | if (lp->options & PCNET32_PORT_FD) { | ||
1718 | ecmd.duplex = DUPLEX_FULL; | ||
1719 | bcr9 |= (1 << 0); | ||
1720 | } else { | ||
1721 | ecmd.duplex = DUPLEX_HALF; | ||
1722 | bcr9 |= ~(1 << 0); | ||
1723 | } | ||
1724 | lp->a.write_bcr(ioaddr, 9, bcr9); | ||
1725 | } | ||
1726 | |||
1727 | for (i=0; i<PCNET32_MAX_PHYS; i++) { | ||
1728 | if (lp->phymask & (1 << i)) { | ||
1729 | /* isolate all but the first PHY */ | ||
1730 | bmcr = mdio_read(dev, i, MII_BMCR); | ||
1731 | if (first_phy == -1) { | ||
1732 | first_phy = i; | ||
1733 | mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); | ||
1734 | } else { | ||
1735 | mdio_write(dev, i, MII_BMCR, bmcr | BMCR_ISOLATE); | ||
1736 | } | ||
1737 | /* use mii_ethtool_sset to setup PHY */ | ||
1738 | lp->mii_if.phy_id = i; | ||
1739 | ecmd.phy_address = i; | ||
1740 | if (lp->options & PCNET32_PORT_ASEL) { | 1730 | if (lp->options & PCNET32_PORT_ASEL) { |
1741 | mii_ethtool_gset(&lp->mii_if, &ecmd); | 1731 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; |
1742 | ecmd.autoneg = AUTONEG_ENABLE; | 1732 | if (netif_msg_link(lp)) |
1733 | printk(KERN_DEBUG | ||
1734 | "%s: Setting 100Mb-Full Duplex.\n", | ||
1735 | dev->name); | ||
1736 | } | ||
1737 | } | ||
1738 | if (lp->phycount < 2) { | ||
1739 | /* | ||
1740 | * 24 Jun 2004 according AMD, in order to change the PHY, | ||
1741 | * DANAS (or DISPM for 79C976) must be set; then select the speed, | ||
1742 | * duplex, and/or enable auto negotiation, and clear DANAS | ||
1743 | */ | ||
1744 | if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { | ||
1745 | lp->a.write_bcr(ioaddr, 32, | ||
1746 | lp->a.read_bcr(ioaddr, 32) | 0x0080); | ||
1747 | /* disable Auto Negotiation, set 10Mpbs, HD */ | ||
1748 | val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; | ||
1749 | if (lp->options & PCNET32_PORT_FD) | ||
1750 | val |= 0x10; | ||
1751 | if (lp->options & PCNET32_PORT_100) | ||
1752 | val |= 0x08; | ||
1753 | lp->a.write_bcr(ioaddr, 32, val); | ||
1754 | } else { | ||
1755 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1756 | lp->a.write_bcr(ioaddr, 32, | ||
1757 | lp->a.read_bcr(ioaddr, | ||
1758 | 32) | 0x0080); | ||
1759 | /* enable auto negotiate, setup, disable fd */ | ||
1760 | val = lp->a.read_bcr(ioaddr, 32) & ~0x98; | ||
1761 | val |= 0x20; | ||
1762 | lp->a.write_bcr(ioaddr, 32, val); | ||
1763 | } | ||
1764 | } | ||
1765 | } else { | ||
1766 | int first_phy = -1; | ||
1767 | u16 bmcr; | ||
1768 | u32 bcr9; | ||
1769 | struct ethtool_cmd ecmd; | ||
1770 | |||
1771 | /* | ||
1772 | * There is really no good other way to handle multiple PHYs | ||
1773 | * other than turning off all automatics | ||
1774 | */ | ||
1775 | val = lp->a.read_bcr(ioaddr, 2); | ||
1776 | lp->a.write_bcr(ioaddr, 2, val & ~2); | ||
1777 | val = lp->a.read_bcr(ioaddr, 32); | ||
1778 | lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ | ||
1779 | |||
1780 | if (!(lp->options & PCNET32_PORT_ASEL)) { | ||
1781 | /* setup ecmd */ | ||
1782 | ecmd.port = PORT_MII; | ||
1783 | ecmd.transceiver = XCVR_INTERNAL; | ||
1784 | ecmd.autoneg = AUTONEG_DISABLE; | ||
1785 | ecmd.speed = | ||
1786 | lp-> | ||
1787 | options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; | ||
1788 | bcr9 = lp->a.read_bcr(ioaddr, 9); | ||
1789 | |||
1790 | if (lp->options & PCNET32_PORT_FD) { | ||
1791 | ecmd.duplex = DUPLEX_FULL; | ||
1792 | bcr9 |= (1 << 0); | ||
1793 | } else { | ||
1794 | ecmd.duplex = DUPLEX_HALF; | ||
1795 | bcr9 |= ~(1 << 0); | ||
1796 | } | ||
1797 | lp->a.write_bcr(ioaddr, 9, bcr9); | ||
1743 | } | 1798 | } |
1744 | mii_ethtool_sset(&lp->mii_if, &ecmd); | 1799 | |
1745 | } | 1800 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { |
1746 | } | 1801 | if (lp->phymask & (1 << i)) { |
1747 | lp->mii_if.phy_id = first_phy; | 1802 | /* isolate all but the first PHY */ |
1748 | if (netif_msg_link(lp)) | 1803 | bmcr = mdio_read(dev, i, MII_BMCR); |
1749 | printk(KERN_INFO "%s: Using PHY number %d.\n", dev->name, first_phy); | 1804 | if (first_phy == -1) { |
1750 | } | 1805 | first_phy = i; |
1806 | mdio_write(dev, i, MII_BMCR, | ||
1807 | bmcr & ~BMCR_ISOLATE); | ||
1808 | } else { | ||
1809 | mdio_write(dev, i, MII_BMCR, | ||
1810 | bmcr | BMCR_ISOLATE); | ||
1811 | } | ||
1812 | /* use mii_ethtool_sset to setup PHY */ | ||
1813 | lp->mii_if.phy_id = i; | ||
1814 | ecmd.phy_address = i; | ||
1815 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1816 | mii_ethtool_gset(&lp->mii_if, &ecmd); | ||
1817 | ecmd.autoneg = AUTONEG_ENABLE; | ||
1818 | } | ||
1819 | mii_ethtool_sset(&lp->mii_if, &ecmd); | ||
1820 | } | ||
1821 | } | ||
1822 | lp->mii_if.phy_id = first_phy; | ||
1823 | if (netif_msg_link(lp)) | ||
1824 | printk(KERN_INFO "%s: Using PHY number %d.\n", | ||
1825 | dev->name, first_phy); | ||
1826 | } | ||
1751 | 1827 | ||
1752 | #ifdef DO_DXSUFLO | 1828 | #ifdef DO_DXSUFLO |
1753 | if (lp->dxsuflo) { /* Disable transmit stop on underflow */ | 1829 | if (lp->dxsuflo) { /* Disable transmit stop on underflow */ |
1754 | val = lp->a.read_csr (ioaddr, 3); | 1830 | val = lp->a.read_csr(ioaddr, 3); |
1755 | val |= 0x40; | 1831 | val |= 0x40; |
1756 | lp->a.write_csr (ioaddr, 3, val); | 1832 | lp->a.write_csr(ioaddr, 3, val); |
1757 | } | 1833 | } |
1758 | #endif | 1834 | #endif |
1759 | 1835 | ||
1760 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 1836 | lp->init_block.mode = |
1761 | pcnet32_load_multicast(dev); | 1837 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
1762 | 1838 | pcnet32_load_multicast(dev); | |
1763 | if (pcnet32_init_ring(dev)) { | 1839 | |
1764 | rc = -ENOMEM; | 1840 | if (pcnet32_init_ring(dev)) { |
1765 | goto err_free_ring; | 1841 | rc = -ENOMEM; |
1766 | } | 1842 | goto err_free_ring; |
1767 | 1843 | } | |
1768 | /* Re-initialize the PCNET32, and start it when done. */ | 1844 | |
1769 | lp->a.write_csr (ioaddr, 1, (lp->dma_addr + | 1845 | /* Re-initialize the PCNET32, and start it when done. */ |
1770 | offsetof(struct pcnet32_private, init_block)) & 0xffff); | 1846 | lp->a.write_csr(ioaddr, 1, (lp->dma_addr + |
1771 | lp->a.write_csr (ioaddr, 2, (lp->dma_addr + | 1847 | offsetof(struct pcnet32_private, |
1772 | offsetof(struct pcnet32_private, init_block)) >> 16); | 1848 | init_block)) & 0xffff); |
1773 | 1849 | lp->a.write_csr(ioaddr, 2, | |
1774 | lp->a.write_csr (ioaddr, 4, 0x0915); | 1850 | (lp->dma_addr + |
1775 | lp->a.write_csr (ioaddr, 0, 0x0001); | 1851 | offsetof(struct pcnet32_private, init_block)) >> 16); |
1776 | 1852 | ||
1777 | netif_start_queue(dev); | 1853 | lp->a.write_csr(ioaddr, 4, 0x0915); |
1778 | 1854 | lp->a.write_csr(ioaddr, 0, 0x0001); | |
1779 | /* Print the link status and start the watchdog */ | 1855 | |
1780 | pcnet32_check_media (dev, 1); | 1856 | netif_start_queue(dev); |
1781 | mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | 1857 | |
1782 | 1858 | /* Print the link status and start the watchdog */ | |
1783 | i = 0; | 1859 | pcnet32_check_media(dev, 1); |
1784 | while (i++ < 100) | 1860 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); |
1785 | if (lp->a.read_csr (ioaddr, 0) & 0x0100) | 1861 | |
1786 | break; | 1862 | i = 0; |
1787 | /* | 1863 | while (i++ < 100) |
1788 | * We used to clear the InitDone bit, 0x0100, here but Mark Stockton | 1864 | if (lp->a.read_csr(ioaddr, 0) & 0x0100) |
1789 | * reports that doing so triggers a bug in the '974. | 1865 | break; |
1790 | */ | 1866 | /* |
1791 | lp->a.write_csr (ioaddr, 0, 0x0042); | 1867 | * We used to clear the InitDone bit, 0x0100, here but Mark Stockton |
1792 | 1868 | * reports that doing so triggers a bug in the '974. | |
1793 | if (netif_msg_ifup(lp)) | 1869 | */ |
1794 | printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", | 1870 | lp->a.write_csr(ioaddr, 0, 0x0042); |
1795 | dev->name, i, (u32) (lp->dma_addr + | 1871 | |
1796 | offsetof(struct pcnet32_private, init_block)), | 1872 | if (netif_msg_ifup(lp)) |
1797 | lp->a.read_csr(ioaddr, 0)); | 1873 | printk(KERN_DEBUG |
1798 | 1874 | "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", | |
1799 | spin_unlock_irqrestore(&lp->lock, flags); | 1875 | dev->name, i, |
1800 | 1876 | (u32) (lp->dma_addr + | |
1801 | return 0; /* Always succeed */ | 1877 | offsetof(struct pcnet32_private, init_block)), |
1802 | 1878 | lp->a.read_csr(ioaddr, 0)); | |
1803 | err_free_ring: | 1879 | |
1804 | /* free any allocated skbuffs */ | 1880 | spin_unlock_irqrestore(&lp->lock, flags); |
1805 | for (i = 0; i < lp->rx_ring_size; i++) { | 1881 | |
1806 | lp->rx_ring[i].status = 0; | 1882 | return 0; /* Always succeed */ |
1807 | if (lp->rx_skbuff[i]) { | 1883 | |
1808 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, | 1884 | err_free_ring: |
1809 | PCI_DMA_FROMDEVICE); | 1885 | /* free any allocated skbuffs */ |
1810 | dev_kfree_skb(lp->rx_skbuff[i]); | 1886 | for (i = 0; i < lp->rx_ring_size; i++) { |
1811 | } | 1887 | lp->rx_ring[i].status = 0; |
1812 | lp->rx_skbuff[i] = NULL; | 1888 | if (lp->rx_skbuff[i]) { |
1813 | lp->rx_dma_addr[i] = 0; | 1889 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], |
1814 | } | 1890 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
1815 | 1891 | dev_kfree_skb(lp->rx_skbuff[i]); | |
1816 | pcnet32_free_ring(dev); | 1892 | } |
1817 | 1893 | lp->rx_skbuff[i] = NULL; | |
1818 | /* | 1894 | lp->rx_dma_addr[i] = 0; |
1819 | * Switch back to 16bit mode to avoid problems with dumb | 1895 | } |
1820 | * DOS packet driver after a warm reboot | 1896 | |
1821 | */ | 1897 | pcnet32_free_ring(dev); |
1822 | lp->a.write_bcr (ioaddr, 20, 4); | 1898 | |
1823 | 1899 | /* | |
1824 | err_free_irq: | 1900 | * Switch back to 16bit mode to avoid problems with dumb |
1825 | spin_unlock_irqrestore(&lp->lock, flags); | 1901 | * DOS packet driver after a warm reboot |
1826 | free_irq(dev->irq, dev); | 1902 | */ |
1827 | return rc; | 1903 | lp->a.write_bcr(ioaddr, 20, 4); |
1904 | |||
1905 | err_free_irq: | ||
1906 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1907 | free_irq(dev->irq, dev); | ||
1908 | return rc; | ||
1828 | } | 1909 | } |
1829 | 1910 | ||
1830 | /* | 1911 | /* |
@@ -1840,722 +1921,792 @@ err_free_irq: | |||
1840 | * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com | 1921 | * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com |
1841 | */ | 1922 | */ |
1842 | 1923 | ||
1843 | static void | 1924 | static void pcnet32_purge_tx_ring(struct net_device *dev) |
1844 | pcnet32_purge_tx_ring(struct net_device *dev) | ||
1845 | { | 1925 | { |
1846 | struct pcnet32_private *lp = dev->priv; | 1926 | struct pcnet32_private *lp = dev->priv; |
1847 | int i; | 1927 | int i; |
1848 | |||
1849 | for (i = 0; i < lp->tx_ring_size; i++) { | ||
1850 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | ||
1851 | wmb(); /* Make sure adapter sees owner change */ | ||
1852 | if (lp->tx_skbuff[i]) { | ||
1853 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | ||
1854 | lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); | ||
1855 | dev_kfree_skb_any(lp->tx_skbuff[i]); | ||
1856 | } | ||
1857 | lp->tx_skbuff[i] = NULL; | ||
1858 | lp->tx_dma_addr[i] = 0; | ||
1859 | } | ||
1860 | } | ||
1861 | 1928 | ||
1929 | for (i = 0; i < lp->tx_ring_size; i++) { | ||
1930 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | ||
1931 | wmb(); /* Make sure adapter sees owner change */ | ||
1932 | if (lp->tx_skbuff[i]) { | ||
1933 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | ||
1934 | lp->tx_skbuff[i]->len, | ||
1935 | PCI_DMA_TODEVICE); | ||
1936 | dev_kfree_skb_any(lp->tx_skbuff[i]); | ||
1937 | } | ||
1938 | lp->tx_skbuff[i] = NULL; | ||
1939 | lp->tx_dma_addr[i] = 0; | ||
1940 | } | ||
1941 | } | ||
1862 | 1942 | ||
1863 | /* Initialize the PCNET32 Rx and Tx rings. */ | 1943 | /* Initialize the PCNET32 Rx and Tx rings. */ |
1864 | static int | 1944 | static int pcnet32_init_ring(struct net_device *dev) |
1865 | pcnet32_init_ring(struct net_device *dev) | ||
1866 | { | 1945 | { |
1867 | struct pcnet32_private *lp = dev->priv; | 1946 | struct pcnet32_private *lp = dev->priv; |
1868 | int i; | 1947 | int i; |
1869 | 1948 | ||
1870 | lp->tx_full = 0; | 1949 | lp->tx_full = 0; |
1871 | lp->cur_rx = lp->cur_tx = 0; | 1950 | lp->cur_rx = lp->cur_tx = 0; |
1872 | lp->dirty_rx = lp->dirty_tx = 0; | 1951 | lp->dirty_rx = lp->dirty_tx = 0; |
1873 | 1952 | ||
1874 | for (i = 0; i < lp->rx_ring_size; i++) { | 1953 | for (i = 0; i < lp->rx_ring_size; i++) { |
1875 | struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; | 1954 | struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; |
1876 | if (rx_skbuff == NULL) { | 1955 | if (rx_skbuff == NULL) { |
1877 | if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { | 1956 | if (! |
1878 | /* there is not much, we can do at this point */ | 1957 | (rx_skbuff = lp->rx_skbuff[i] = |
1879 | if (pcnet32_debug & NETIF_MSG_DRV) | 1958 | dev_alloc_skb(PKT_BUF_SZ))) { |
1880 | printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", | 1959 | /* there is not much, we can do at this point */ |
1881 | dev->name); | 1960 | if (pcnet32_debug & NETIF_MSG_DRV) |
1882 | return -1; | 1961 | printk(KERN_ERR |
1883 | } | 1962 | "%s: pcnet32_init_ring dev_alloc_skb failed.\n", |
1884 | skb_reserve (rx_skbuff, 2); | 1963 | dev->name); |
1885 | } | 1964 | return -1; |
1886 | 1965 | } | |
1887 | rmb(); | 1966 | skb_reserve(rx_skbuff, 2); |
1888 | if (lp->rx_dma_addr[i] == 0) | 1967 | } |
1889 | lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, | 1968 | |
1890 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | 1969 | rmb(); |
1891 | lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); | 1970 | if (lp->rx_dma_addr[i] == 0) |
1892 | lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); | 1971 | lp->rx_dma_addr[i] = |
1893 | wmb(); /* Make sure owner changes after all others are visible */ | 1972 | pci_map_single(lp->pci_dev, rx_skbuff->data, |
1894 | lp->rx_ring[i].status = le16_to_cpu(0x8000); | 1973 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
1895 | } | 1974 | lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]); |
1896 | /* The Tx buffer address is filled in as needed, but we do need to clear | 1975 | lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); |
1897 | * the upper ownership bit. */ | 1976 | wmb(); /* Make sure owner changes after all others are visible */ |
1898 | for (i = 0; i < lp->tx_ring_size; i++) { | 1977 | lp->rx_ring[i].status = le16_to_cpu(0x8000); |
1899 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | 1978 | } |
1900 | wmb(); /* Make sure adapter sees owner change */ | 1979 | /* The Tx buffer address is filled in as needed, but we do need to clear |
1901 | lp->tx_ring[i].base = 0; | 1980 | * the upper ownership bit. */ |
1902 | lp->tx_dma_addr[i] = 0; | 1981 | for (i = 0; i < lp->tx_ring_size; i++) { |
1903 | } | 1982 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ |
1904 | 1983 | wmb(); /* Make sure adapter sees owner change */ | |
1905 | lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); | 1984 | lp->tx_ring[i].base = 0; |
1906 | for (i = 0; i < 6; i++) | 1985 | lp->tx_dma_addr[i] = 0; |
1907 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | 1986 | } |
1908 | lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); | 1987 | |
1909 | lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); | 1988 | lp->init_block.tlen_rlen = |
1910 | wmb(); /* Make sure all changes are visible */ | 1989 | le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); |
1911 | return 0; | 1990 | for (i = 0; i < 6; i++) |
1991 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | ||
1992 | lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); | ||
1993 | lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); | ||
1994 | wmb(); /* Make sure all changes are visible */ | ||
1995 | return 0; | ||
1912 | } | 1996 | } |
1913 | 1997 | ||
1914 | /* the pcnet32 has been issued a stop or reset. Wait for the stop bit | 1998 | /* the pcnet32 has been issued a stop or reset. Wait for the stop bit |
1915 | * then flush the pending transmit operations, re-initialize the ring, | 1999 | * then flush the pending transmit operations, re-initialize the ring, |
1916 | * and tell the chip to initialize. | 2000 | * and tell the chip to initialize. |
1917 | */ | 2001 | */ |
1918 | static void | 2002 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) |
1919 | pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) | ||
1920 | { | 2003 | { |
1921 | struct pcnet32_private *lp = dev->priv; | 2004 | struct pcnet32_private *lp = dev->priv; |
1922 | unsigned long ioaddr = dev->base_addr; | 2005 | unsigned long ioaddr = dev->base_addr; |
1923 | int i; | 2006 | int i; |
1924 | 2007 | ||
1925 | /* wait for stop */ | 2008 | /* wait for stop */ |
1926 | for (i=0; i<100; i++) | 2009 | for (i = 0; i < 100; i++) |
1927 | if (lp->a.read_csr(ioaddr, 0) & 0x0004) | 2010 | if (lp->a.read_csr(ioaddr, 0) & 0x0004) |
1928 | break; | 2011 | break; |
1929 | 2012 | ||
1930 | if (i >= 100 && netif_msg_drv(lp)) | 2013 | if (i >= 100 && netif_msg_drv(lp)) |
1931 | printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n", | 2014 | printk(KERN_ERR |
1932 | dev->name); | 2015 | "%s: pcnet32_restart timed out waiting for stop.\n", |
2016 | dev->name); | ||
1933 | 2017 | ||
1934 | pcnet32_purge_tx_ring(dev); | 2018 | pcnet32_purge_tx_ring(dev); |
1935 | if (pcnet32_init_ring(dev)) | 2019 | if (pcnet32_init_ring(dev)) |
1936 | return; | 2020 | return; |
1937 | 2021 | ||
1938 | /* ReInit Ring */ | 2022 | /* ReInit Ring */ |
1939 | lp->a.write_csr (ioaddr, 0, 1); | 2023 | lp->a.write_csr(ioaddr, 0, 1); |
1940 | i = 0; | 2024 | i = 0; |
1941 | while (i++ < 1000) | 2025 | while (i++ < 1000) |
1942 | if (lp->a.read_csr (ioaddr, 0) & 0x0100) | 2026 | if (lp->a.read_csr(ioaddr, 0) & 0x0100) |
1943 | break; | 2027 | break; |
1944 | 2028 | ||
1945 | lp->a.write_csr (ioaddr, 0, csr0_bits); | 2029 | lp->a.write_csr(ioaddr, 0, csr0_bits); |
1946 | } | 2030 | } |
1947 | 2031 | ||
1948 | 2032 | static void pcnet32_tx_timeout(struct net_device *dev) | |
1949 | static void | ||
1950 | pcnet32_tx_timeout (struct net_device *dev) | ||
1951 | { | 2033 | { |
1952 | struct pcnet32_private *lp = dev->priv; | 2034 | struct pcnet32_private *lp = dev->priv; |
1953 | unsigned long ioaddr = dev->base_addr, flags; | 2035 | unsigned long ioaddr = dev->base_addr, flags; |
1954 | 2036 | ||
1955 | spin_lock_irqsave(&lp->lock, flags); | 2037 | spin_lock_irqsave(&lp->lock, flags); |
1956 | /* Transmitter timeout, serious problems. */ | 2038 | /* Transmitter timeout, serious problems. */ |
1957 | if (pcnet32_debug & NETIF_MSG_DRV) | 2039 | if (pcnet32_debug & NETIF_MSG_DRV) |
1958 | printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", | 2040 | printk(KERN_ERR |
1959 | dev->name, lp->a.read_csr(ioaddr, 0)); | 2041 | "%s: transmit timed out, status %4.4x, resetting.\n", |
1960 | lp->a.write_csr (ioaddr, 0, 0x0004); | 2042 | dev->name, lp->a.read_csr(ioaddr, 0)); |
1961 | lp->stats.tx_errors++; | 2043 | lp->a.write_csr(ioaddr, 0, 0x0004); |
1962 | if (netif_msg_tx_err(lp)) { | 2044 | lp->stats.tx_errors++; |
1963 | int i; | 2045 | if (netif_msg_tx_err(lp)) { |
1964 | printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", | 2046 | int i; |
1965 | lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", | 2047 | printk(KERN_DEBUG |
1966 | lp->cur_rx); | 2048 | " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", |
1967 | for (i = 0 ; i < lp->rx_ring_size; i++) | 2049 | lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", |
1968 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", | 2050 | lp->cur_rx); |
1969 | le32_to_cpu(lp->rx_ring[i].base), | 2051 | for (i = 0; i < lp->rx_ring_size; i++) |
1970 | (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, | 2052 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", |
1971 | le32_to_cpu(lp->rx_ring[i].msg_length), | 2053 | le32_to_cpu(lp->rx_ring[i].base), |
1972 | le16_to_cpu(lp->rx_ring[i].status)); | 2054 | (-le16_to_cpu(lp->rx_ring[i].buf_length)) & |
1973 | for (i = 0 ; i < lp->tx_ring_size; i++) | 2055 | 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), |
1974 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", | 2056 | le16_to_cpu(lp->rx_ring[i].status)); |
1975 | le32_to_cpu(lp->tx_ring[i].base), | 2057 | for (i = 0; i < lp->tx_ring_size; i++) |
1976 | (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, | 2058 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", |
1977 | le32_to_cpu(lp->tx_ring[i].misc), | 2059 | le32_to_cpu(lp->tx_ring[i].base), |
1978 | le16_to_cpu(lp->tx_ring[i].status)); | 2060 | (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, |
1979 | printk("\n"); | 2061 | le32_to_cpu(lp->tx_ring[i].misc), |
1980 | } | 2062 | le16_to_cpu(lp->tx_ring[i].status)); |
1981 | pcnet32_restart(dev, 0x0042); | 2063 | printk("\n"); |
1982 | 2064 | } | |
1983 | dev->trans_start = jiffies; | 2065 | pcnet32_restart(dev, 0x0042); |
1984 | netif_wake_queue(dev); | ||
1985 | |||
1986 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1987 | } | ||
1988 | 2066 | ||
2067 | dev->trans_start = jiffies; | ||
2068 | netif_wake_queue(dev); | ||
1989 | 2069 | ||
1990 | static int | 2070 | spin_unlock_irqrestore(&lp->lock, flags); |
1991 | pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) | 2071 | } |
2072 | |||
2073 | static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1992 | { | 2074 | { |
1993 | struct pcnet32_private *lp = dev->priv; | 2075 | struct pcnet32_private *lp = dev->priv; |
1994 | unsigned long ioaddr = dev->base_addr; | 2076 | unsigned long ioaddr = dev->base_addr; |
1995 | u16 status; | 2077 | u16 status; |
1996 | int entry; | 2078 | int entry; |
1997 | unsigned long flags; | 2079 | unsigned long flags; |
1998 | 2080 | ||
1999 | spin_lock_irqsave(&lp->lock, flags); | 2081 | spin_lock_irqsave(&lp->lock, flags); |
2000 | 2082 | ||
2001 | if (netif_msg_tx_queued(lp)) { | 2083 | if (netif_msg_tx_queued(lp)) { |
2002 | printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", | 2084 | printk(KERN_DEBUG |
2003 | dev->name, lp->a.read_csr(ioaddr, 0)); | 2085 | "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", |
2004 | } | 2086 | dev->name, lp->a.read_csr(ioaddr, 0)); |
2087 | } | ||
2005 | 2088 | ||
2006 | /* Default status -- will not enable Successful-TxDone | 2089 | /* Default status -- will not enable Successful-TxDone |
2007 | * interrupt when that option is available to us. | 2090 | * interrupt when that option is available to us. |
2008 | */ | 2091 | */ |
2009 | status = 0x8300; | 2092 | status = 0x8300; |
2010 | 2093 | ||
2011 | /* Fill in a Tx ring entry */ | 2094 | /* Fill in a Tx ring entry */ |
2012 | 2095 | ||
2013 | /* Mask to ring buffer boundary. */ | 2096 | /* Mask to ring buffer boundary. */ |
2014 | entry = lp->cur_tx & lp->tx_mod_mask; | 2097 | entry = lp->cur_tx & lp->tx_mod_mask; |
2015 | 2098 | ||
2016 | /* Caution: the write order is important here, set the status | 2099 | /* Caution: the write order is important here, set the status |
2017 | * with the "ownership" bits last. */ | 2100 | * with the "ownership" bits last. */ |
2018 | 2101 | ||
2019 | lp->tx_ring[entry].length = le16_to_cpu(-skb->len); | 2102 | lp->tx_ring[entry].length = le16_to_cpu(-skb->len); |
2020 | 2103 | ||
2021 | lp->tx_ring[entry].misc = 0x00000000; | 2104 | lp->tx_ring[entry].misc = 0x00000000; |
2022 | 2105 | ||
2023 | lp->tx_skbuff[entry] = skb; | 2106 | lp->tx_skbuff[entry] = skb; |
2024 | lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, | 2107 | lp->tx_dma_addr[entry] = |
2025 | PCI_DMA_TODEVICE); | 2108 | pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); |
2026 | lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); | 2109 | lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]); |
2027 | wmb(); /* Make sure owner changes after all others are visible */ | 2110 | wmb(); /* Make sure owner changes after all others are visible */ |
2028 | lp->tx_ring[entry].status = le16_to_cpu(status); | 2111 | lp->tx_ring[entry].status = le16_to_cpu(status); |
2029 | 2112 | ||
2030 | lp->cur_tx++; | 2113 | lp->cur_tx++; |
2031 | lp->stats.tx_bytes += skb->len; | 2114 | lp->stats.tx_bytes += skb->len; |
2032 | 2115 | ||
2033 | /* Trigger an immediate send poll. */ | 2116 | /* Trigger an immediate send poll. */ |
2034 | lp->a.write_csr (ioaddr, 0, 0x0048); | 2117 | lp->a.write_csr(ioaddr, 0, 0x0048); |
2035 | 2118 | ||
2036 | dev->trans_start = jiffies; | 2119 | dev->trans_start = jiffies; |
2037 | 2120 | ||
2038 | if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { | 2121 | if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { |
2039 | lp->tx_full = 1; | 2122 | lp->tx_full = 1; |
2040 | netif_stop_queue(dev); | 2123 | netif_stop_queue(dev); |
2041 | } | 2124 | } |
2042 | spin_unlock_irqrestore(&lp->lock, flags); | 2125 | spin_unlock_irqrestore(&lp->lock, flags); |
2043 | return 0; | 2126 | return 0; |
2044 | } | 2127 | } |
2045 | 2128 | ||
2046 | /* The PCNET32 interrupt handler. */ | 2129 | /* The PCNET32 interrupt handler. */ |
2047 | static irqreturn_t | 2130 | static irqreturn_t |
2048 | pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 2131 | pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
2049 | { | 2132 | { |
2050 | struct net_device *dev = dev_id; | 2133 | struct net_device *dev = dev_id; |
2051 | struct pcnet32_private *lp; | 2134 | struct pcnet32_private *lp; |
2052 | unsigned long ioaddr; | 2135 | unsigned long ioaddr; |
2053 | u16 csr0,rap; | 2136 | u16 csr0, rap; |
2054 | int boguscnt = max_interrupt_work; | 2137 | int boguscnt = max_interrupt_work; |
2055 | int must_restart; | 2138 | int must_restart; |
2056 | 2139 | ||
2057 | if (!dev) { | 2140 | if (!dev) { |
2058 | if (pcnet32_debug & NETIF_MSG_INTR) | 2141 | if (pcnet32_debug & NETIF_MSG_INTR) |
2059 | printk (KERN_DEBUG "%s(): irq %d for unknown device\n", | 2142 | printk(KERN_DEBUG "%s(): irq %d for unknown device\n", |
2060 | __FUNCTION__, irq); | 2143 | __FUNCTION__, irq); |
2061 | return IRQ_NONE; | 2144 | return IRQ_NONE; |
2062 | } | ||
2063 | |||
2064 | ioaddr = dev->base_addr; | ||
2065 | lp = dev->priv; | ||
2066 | |||
2067 | spin_lock(&lp->lock); | ||
2068 | |||
2069 | rap = lp->a.read_rap(ioaddr); | ||
2070 | while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { | ||
2071 | if (csr0 == 0xffff) { | ||
2072 | break; /* PCMCIA remove happened */ | ||
2073 | } | 2145 | } |
2074 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
2075 | lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f); | ||
2076 | 2146 | ||
2077 | must_restart = 0; | 2147 | ioaddr = dev->base_addr; |
2148 | lp = dev->priv; | ||
2078 | 2149 | ||
2079 | if (netif_msg_intr(lp)) | 2150 | spin_lock(&lp->lock); |
2080 | printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", | 2151 | |
2081 | dev->name, csr0, lp->a.read_csr (ioaddr, 0)); | 2152 | rap = lp->a.read_rap(ioaddr); |
2082 | 2153 | while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { | |
2083 | if (csr0 & 0x0400) /* Rx interrupt */ | 2154 | if (csr0 == 0xffff) { |
2084 | pcnet32_rx(dev); | 2155 | break; /* PCMCIA remove happened */ |
2085 | 2156 | } | |
2086 | if (csr0 & 0x0200) { /* Tx-done interrupt */ | 2157 | /* Acknowledge all of the current interrupt sources ASAP. */ |
2087 | unsigned int dirty_tx = lp->dirty_tx; | 2158 | lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); |
2088 | int delta; | 2159 | |
2089 | 2160 | must_restart = 0; | |
2090 | while (dirty_tx != lp->cur_tx) { | 2161 | |
2091 | int entry = dirty_tx & lp->tx_mod_mask; | 2162 | if (netif_msg_intr(lp)) |
2092 | int status = (short)le16_to_cpu(lp->tx_ring[entry].status); | 2163 | printk(KERN_DEBUG |
2093 | 2164 | "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", | |
2094 | if (status < 0) | 2165 | dev->name, csr0, lp->a.read_csr(ioaddr, 0)); |
2095 | break; /* It still hasn't been Txed */ | 2166 | |
2096 | 2167 | if (csr0 & 0x0400) /* Rx interrupt */ | |
2097 | lp->tx_ring[entry].base = 0; | 2168 | pcnet32_rx(dev); |
2098 | 2169 | ||
2099 | if (status & 0x4000) { | 2170 | if (csr0 & 0x0200) { /* Tx-done interrupt */ |
2100 | /* There was an major error, log it. */ | 2171 | unsigned int dirty_tx = lp->dirty_tx; |
2101 | int err_status = le32_to_cpu(lp->tx_ring[entry].misc); | 2172 | int delta; |
2102 | lp->stats.tx_errors++; | 2173 | |
2103 | if (netif_msg_tx_err(lp)) | 2174 | while (dirty_tx != lp->cur_tx) { |
2104 | printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n", | 2175 | int entry = dirty_tx & lp->tx_mod_mask; |
2105 | dev->name, status, err_status); | 2176 | int status = |
2106 | if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; | 2177 | (short)le16_to_cpu(lp->tx_ring[entry]. |
2107 | if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; | 2178 | status); |
2108 | if (err_status & 0x10000000) lp->stats.tx_window_errors++; | 2179 | |
2180 | if (status < 0) | ||
2181 | break; /* It still hasn't been Txed */ | ||
2182 | |||
2183 | lp->tx_ring[entry].base = 0; | ||
2184 | |||
2185 | if (status & 0x4000) { | ||
2186 | /* There was an major error, log it. */ | ||
2187 | int err_status = | ||
2188 | le32_to_cpu(lp->tx_ring[entry]. | ||
2189 | misc); | ||
2190 | lp->stats.tx_errors++; | ||
2191 | if (netif_msg_tx_err(lp)) | ||
2192 | printk(KERN_ERR | ||
2193 | "%s: Tx error status=%04x err_status=%08x\n", | ||
2194 | dev->name, status, | ||
2195 | err_status); | ||
2196 | if (err_status & 0x04000000) | ||
2197 | lp->stats.tx_aborted_errors++; | ||
2198 | if (err_status & 0x08000000) | ||
2199 | lp->stats.tx_carrier_errors++; | ||
2200 | if (err_status & 0x10000000) | ||
2201 | lp->stats.tx_window_errors++; | ||
2109 | #ifndef DO_DXSUFLO | 2202 | #ifndef DO_DXSUFLO |
2110 | if (err_status & 0x40000000) { | 2203 | if (err_status & 0x40000000) { |
2111 | lp->stats.tx_fifo_errors++; | 2204 | lp->stats.tx_fifo_errors++; |
2112 | /* Ackk! On FIFO errors the Tx unit is turned off! */ | 2205 | /* Ackk! On FIFO errors the Tx unit is turned off! */ |
2113 | /* Remove this verbosity later! */ | 2206 | /* Remove this verbosity later! */ |
2114 | if (netif_msg_tx_err(lp)) | 2207 | if (netif_msg_tx_err(lp)) |
2115 | printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", | 2208 | printk(KERN_ERR |
2116 | dev->name, csr0); | 2209 | "%s: Tx FIFO error! CSR0=%4.4x\n", |
2117 | must_restart = 1; | 2210 | dev->name, csr0); |
2118 | } | 2211 | must_restart = 1; |
2212 | } | ||
2119 | #else | 2213 | #else |
2120 | if (err_status & 0x40000000) { | 2214 | if (err_status & 0x40000000) { |
2121 | lp->stats.tx_fifo_errors++; | 2215 | lp->stats.tx_fifo_errors++; |
2122 | if (! lp->dxsuflo) { /* If controller doesn't recover ... */ | 2216 | if (!lp->dxsuflo) { /* If controller doesn't recover ... */ |
2123 | /* Ackk! On FIFO errors the Tx unit is turned off! */ | 2217 | /* Ackk! On FIFO errors the Tx unit is turned off! */ |
2124 | /* Remove this verbosity later! */ | 2218 | /* Remove this verbosity later! */ |
2125 | if (netif_msg_tx_err(lp)) | 2219 | if (netif_msg_tx_err |
2126 | printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", | 2220 | (lp)) |
2127 | dev->name, csr0); | 2221 | printk(KERN_ERR |
2128 | must_restart = 1; | 2222 | "%s: Tx FIFO error! CSR0=%4.4x\n", |
2129 | } | 2223 | dev-> |
2130 | } | 2224 | name, |
2225 | csr0); | ||
2226 | must_restart = 1; | ||
2227 | } | ||
2228 | } | ||
2131 | #endif | 2229 | #endif |
2132 | } else { | 2230 | } else { |
2133 | if (status & 0x1800) | 2231 | if (status & 0x1800) |
2134 | lp->stats.collisions++; | 2232 | lp->stats.collisions++; |
2135 | lp->stats.tx_packets++; | 2233 | lp->stats.tx_packets++; |
2234 | } | ||
2235 | |||
2236 | /* We must free the original skb */ | ||
2237 | if (lp->tx_skbuff[entry]) { | ||
2238 | pci_unmap_single(lp->pci_dev, | ||
2239 | lp->tx_dma_addr[entry], | ||
2240 | lp->tx_skbuff[entry]-> | ||
2241 | len, PCI_DMA_TODEVICE); | ||
2242 | dev_kfree_skb_irq(lp->tx_skbuff[entry]); | ||
2243 | lp->tx_skbuff[entry] = NULL; | ||
2244 | lp->tx_dma_addr[entry] = 0; | ||
2245 | } | ||
2246 | dirty_tx++; | ||
2247 | } | ||
2248 | |||
2249 | delta = | ||
2250 | (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + | ||
2251 | lp->tx_ring_size); | ||
2252 | if (delta > lp->tx_ring_size) { | ||
2253 | if (netif_msg_drv(lp)) | ||
2254 | printk(KERN_ERR | ||
2255 | "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", | ||
2256 | dev->name, dirty_tx, lp->cur_tx, | ||
2257 | lp->tx_full); | ||
2258 | dirty_tx += lp->tx_ring_size; | ||
2259 | delta -= lp->tx_ring_size; | ||
2260 | } | ||
2261 | |||
2262 | if (lp->tx_full && | ||
2263 | netif_queue_stopped(dev) && | ||
2264 | delta < lp->tx_ring_size - 2) { | ||
2265 | /* The ring is no longer full, clear tbusy. */ | ||
2266 | lp->tx_full = 0; | ||
2267 | netif_wake_queue(dev); | ||
2268 | } | ||
2269 | lp->dirty_tx = dirty_tx; | ||
2270 | } | ||
2271 | |||
2272 | /* Log misc errors. */ | ||
2273 | if (csr0 & 0x4000) | ||
2274 | lp->stats.tx_errors++; /* Tx babble. */ | ||
2275 | if (csr0 & 0x1000) { | ||
2276 | /* | ||
2277 | * this happens when our receive ring is full. This shouldn't | ||
2278 | * be a problem as we will see normal rx interrupts for the frames | ||
2279 | * in the receive ring. But there are some PCI chipsets (I can | ||
2280 | * reproduce this on SP3G with Intel saturn chipset) which have | ||
2281 | * sometimes problems and will fill up the receive ring with | ||
2282 | * error descriptors. In this situation we don't get a rx | ||
2283 | * interrupt, but a missed frame interrupt sooner or later. | ||
2284 | * So we try to clean up our receive ring here. | ||
2285 | */ | ||
2286 | pcnet32_rx(dev); | ||
2287 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | ||
2288 | } | ||
2289 | if (csr0 & 0x0800) { | ||
2290 | if (netif_msg_drv(lp)) | ||
2291 | printk(KERN_ERR | ||
2292 | "%s: Bus master arbitration failure, status %4.4x.\n", | ||
2293 | dev->name, csr0); | ||
2294 | /* unlike for the lance, there is no restart needed */ | ||
2136 | } | 2295 | } |
2137 | 2296 | ||
2138 | /* We must free the original skb */ | 2297 | if (must_restart) { |
2139 | if (lp->tx_skbuff[entry]) { | 2298 | /* reset the chip to clear the error condition, then restart */ |
2140 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], | 2299 | lp->a.reset(ioaddr); |
2141 | lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); | 2300 | lp->a.write_csr(ioaddr, 4, 0x0915); |
2142 | dev_kfree_skb_irq(lp->tx_skbuff[entry]); | 2301 | pcnet32_restart(dev, 0x0002); |
2143 | lp->tx_skbuff[entry] = NULL; | 2302 | netif_wake_queue(dev); |
2144 | lp->tx_dma_addr[entry] = 0; | ||
2145 | } | 2303 | } |
2146 | dirty_tx++; | 2304 | } |
2147 | } | 2305 | |
2148 | 2306 | /* Set interrupt enable. */ | |
2149 | delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); | 2307 | lp->a.write_csr(ioaddr, 0, 0x0040); |
2150 | if (delta > lp->tx_ring_size) { | 2308 | lp->a.write_rap(ioaddr, rap); |
2151 | if (netif_msg_drv(lp)) | 2309 | |
2152 | printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", | 2310 | if (netif_msg_intr(lp)) |
2153 | dev->name, dirty_tx, lp->cur_tx, lp->tx_full); | 2311 | printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", |
2154 | dirty_tx += lp->tx_ring_size; | 2312 | dev->name, lp->a.read_csr(ioaddr, 0)); |
2155 | delta -= lp->tx_ring_size; | 2313 | |
2156 | } | 2314 | spin_unlock(&lp->lock); |
2157 | 2315 | ||
2158 | if (lp->tx_full && | 2316 | return IRQ_HANDLED; |
2159 | netif_queue_stopped(dev) && | ||
2160 | delta < lp->tx_ring_size - 2) { | ||
2161 | /* The ring is no longer full, clear tbusy. */ | ||
2162 | lp->tx_full = 0; | ||
2163 | netif_wake_queue (dev); | ||
2164 | } | ||
2165 | lp->dirty_tx = dirty_tx; | ||
2166 | } | ||
2167 | |||
2168 | /* Log misc errors. */ | ||
2169 | if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ | ||
2170 | if (csr0 & 0x1000) { | ||
2171 | /* | ||
2172 | * this happens when our receive ring is full. This shouldn't | ||
2173 | * be a problem as we will see normal rx interrupts for the frames | ||
2174 | * in the receive ring. But there are some PCI chipsets (I can | ||
2175 | * reproduce this on SP3G with Intel saturn chipset) which have | ||
2176 | * sometimes problems and will fill up the receive ring with | ||
2177 | * error descriptors. In this situation we don't get a rx | ||
2178 | * interrupt, but a missed frame interrupt sooner or later. | ||
2179 | * So we try to clean up our receive ring here. | ||
2180 | */ | ||
2181 | pcnet32_rx(dev); | ||
2182 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | ||
2183 | } | ||
2184 | if (csr0 & 0x0800) { | ||
2185 | if (netif_msg_drv(lp)) | ||
2186 | printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n", | ||
2187 | dev->name, csr0); | ||
2188 | /* unlike for the lance, there is no restart needed */ | ||
2189 | } | ||
2190 | |||
2191 | if (must_restart) { | ||
2192 | /* reset the chip to clear the error condition, then restart */ | ||
2193 | lp->a.reset(ioaddr); | ||
2194 | lp->a.write_csr(ioaddr, 4, 0x0915); | ||
2195 | pcnet32_restart(dev, 0x0002); | ||
2196 | netif_wake_queue(dev); | ||
2197 | } | ||
2198 | } | ||
2199 | |||
2200 | /* Set interrupt enable. */ | ||
2201 | lp->a.write_csr (ioaddr, 0, 0x0040); | ||
2202 | lp->a.write_rap (ioaddr,rap); | ||
2203 | |||
2204 | if (netif_msg_intr(lp)) | ||
2205 | printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", | ||
2206 | dev->name, lp->a.read_csr (ioaddr, 0)); | ||
2207 | |||
2208 | spin_unlock(&lp->lock); | ||
2209 | |||
2210 | return IRQ_HANDLED; | ||
2211 | } | 2317 | } |
2212 | 2318 | ||
2213 | static int | 2319 | static int pcnet32_rx(struct net_device *dev) |
2214 | pcnet32_rx(struct net_device *dev) | ||
2215 | { | 2320 | { |
2216 | struct pcnet32_private *lp = dev->priv; | 2321 | struct pcnet32_private *lp = dev->priv; |
2217 | int entry = lp->cur_rx & lp->rx_mod_mask; | 2322 | int entry = lp->cur_rx & lp->rx_mod_mask; |
2218 | int boguscnt = lp->rx_ring_size / 2; | 2323 | int boguscnt = lp->rx_ring_size / 2; |
2219 | 2324 | ||
2220 | /* If we own the next entry, it's a new packet. Send it up. */ | 2325 | /* If we own the next entry, it's a new packet. Send it up. */ |
2221 | while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { | 2326 | while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { |
2222 | int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; | 2327 | int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; |
2223 | 2328 | ||
2224 | if (status != 0x03) { /* There was an error. */ | 2329 | if (status != 0x03) { /* There was an error. */ |
2225 | /* | 2330 | /* |
2226 | * There is a tricky error noted by John Murphy, | 2331 | * There is a tricky error noted by John Murphy, |
2227 | * <murf@perftech.com> to Russ Nelson: Even with full-sized | 2332 | * <murf@perftech.com> to Russ Nelson: Even with full-sized |
2228 | * buffers it's possible for a jabber packet to use two | 2333 | * buffers it's possible for a jabber packet to use two |
2229 | * buffers, with only the last correctly noting the error. | 2334 | * buffers, with only the last correctly noting the error. |
2230 | */ | 2335 | */ |
2231 | if (status & 0x01) /* Only count a general error at the */ | 2336 | if (status & 0x01) /* Only count a general error at the */ |
2232 | lp->stats.rx_errors++; /* end of a packet.*/ | 2337 | lp->stats.rx_errors++; /* end of a packet. */ |
2233 | if (status & 0x20) lp->stats.rx_frame_errors++; | 2338 | if (status & 0x20) |
2234 | if (status & 0x10) lp->stats.rx_over_errors++; | 2339 | lp->stats.rx_frame_errors++; |
2235 | if (status & 0x08) lp->stats.rx_crc_errors++; | 2340 | if (status & 0x10) |
2236 | if (status & 0x04) lp->stats.rx_fifo_errors++; | 2341 | lp->stats.rx_over_errors++; |
2237 | lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); | 2342 | if (status & 0x08) |
2238 | } else { | 2343 | lp->stats.rx_crc_errors++; |
2239 | /* Malloc up new buffer, compatible with net-2e. */ | 2344 | if (status & 0x04) |
2240 | short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; | 2345 | lp->stats.rx_fifo_errors++; |
2241 | struct sk_buff *skb; | 2346 | lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); |
2242 | |||
2243 | /* Discard oversize frames. */ | ||
2244 | if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { | ||
2245 | if (netif_msg_drv(lp)) | ||
2246 | printk(KERN_ERR "%s: Impossible packet size %d!\n", | ||
2247 | dev->name, pkt_len); | ||
2248 | lp->stats.rx_errors++; | ||
2249 | } else if (pkt_len < 60) { | ||
2250 | if (netif_msg_rx_err(lp)) | ||
2251 | printk(KERN_ERR "%s: Runt packet!\n", dev->name); | ||
2252 | lp->stats.rx_errors++; | ||
2253 | } else { | ||
2254 | int rx_in_place = 0; | ||
2255 | |||
2256 | if (pkt_len > rx_copybreak) { | ||
2257 | struct sk_buff *newskb; | ||
2258 | |||
2259 | if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { | ||
2260 | skb_reserve (newskb, 2); | ||
2261 | skb = lp->rx_skbuff[entry]; | ||
2262 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], | ||
2263 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | ||
2264 | skb_put (skb, pkt_len); | ||
2265 | lp->rx_skbuff[entry] = newskb; | ||
2266 | newskb->dev = dev; | ||
2267 | lp->rx_dma_addr[entry] = | ||
2268 | pci_map_single(lp->pci_dev, newskb->data, | ||
2269 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | ||
2270 | lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); | ||
2271 | rx_in_place = 1; | ||
2272 | } else | ||
2273 | skb = NULL; | ||
2274 | } else { | 2347 | } else { |
2275 | skb = dev_alloc_skb(pkt_len+2); | 2348 | /* Malloc up new buffer, compatible with net-2e. */ |
2276 | } | 2349 | short pkt_len = |
2277 | 2350 | (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff) | |
2278 | if (skb == NULL) { | 2351 | - 4; |
2279 | int i; | 2352 | struct sk_buff *skb; |
2280 | if (netif_msg_drv(lp)) | 2353 | |
2281 | printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", | 2354 | /* Discard oversize frames. */ |
2282 | dev->name); | 2355 | if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { |
2283 | for (i = 0; i < lp->rx_ring_size; i++) | 2356 | if (netif_msg_drv(lp)) |
2284 | if ((short)le16_to_cpu(lp->rx_ring[(entry+i) | 2357 | printk(KERN_ERR |
2285 | & lp->rx_mod_mask].status) < 0) | 2358 | "%s: Impossible packet size %d!\n", |
2286 | break; | 2359 | dev->name, pkt_len); |
2287 | 2360 | lp->stats.rx_errors++; | |
2288 | if (i > lp->rx_ring_size -2) { | 2361 | } else if (pkt_len < 60) { |
2289 | lp->stats.rx_dropped++; | 2362 | if (netif_msg_rx_err(lp)) |
2290 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | 2363 | printk(KERN_ERR "%s: Runt packet!\n", |
2291 | wmb(); /* Make sure adapter sees owner change */ | 2364 | dev->name); |
2292 | lp->cur_rx++; | 2365 | lp->stats.rx_errors++; |
2293 | } | 2366 | } else { |
2294 | break; | 2367 | int rx_in_place = 0; |
2295 | } | 2368 | |
2296 | skb->dev = dev; | 2369 | if (pkt_len > rx_copybreak) { |
2297 | if (!rx_in_place) { | 2370 | struct sk_buff *newskb; |
2298 | skb_reserve(skb,2); /* 16 byte align */ | 2371 | |
2299 | skb_put(skb,pkt_len); /* Make room */ | 2372 | if ((newskb = |
2300 | pci_dma_sync_single_for_cpu(lp->pci_dev, | 2373 | dev_alloc_skb(PKT_BUF_SZ))) { |
2301 | lp->rx_dma_addr[entry], | 2374 | skb_reserve(newskb, 2); |
2302 | PKT_BUF_SZ-2, | 2375 | skb = lp->rx_skbuff[entry]; |
2303 | PCI_DMA_FROMDEVICE); | 2376 | pci_unmap_single(lp->pci_dev, |
2304 | eth_copy_and_sum(skb, | 2377 | lp-> |
2305 | (unsigned char *)(lp->rx_skbuff[entry]->data), | 2378 | rx_dma_addr |
2306 | pkt_len,0); | 2379 | [entry], |
2307 | pci_dma_sync_single_for_device(lp->pci_dev, | 2380 | PKT_BUF_SZ - 2, |
2308 | lp->rx_dma_addr[entry], | 2381 | PCI_DMA_FROMDEVICE); |
2309 | PKT_BUF_SZ-2, | 2382 | skb_put(skb, pkt_len); |
2310 | PCI_DMA_FROMDEVICE); | 2383 | lp->rx_skbuff[entry] = newskb; |
2384 | newskb->dev = dev; | ||
2385 | lp->rx_dma_addr[entry] = | ||
2386 | pci_map_single(lp->pci_dev, | ||
2387 | newskb->data, | ||
2388 | PKT_BUF_SZ - | ||
2389 | 2, | ||
2390 | PCI_DMA_FROMDEVICE); | ||
2391 | lp->rx_ring[entry].base = | ||
2392 | le32_to_cpu(lp-> | ||
2393 | rx_dma_addr | ||
2394 | [entry]); | ||
2395 | rx_in_place = 1; | ||
2396 | } else | ||
2397 | skb = NULL; | ||
2398 | } else { | ||
2399 | skb = dev_alloc_skb(pkt_len + 2); | ||
2400 | } | ||
2401 | |||
2402 | if (skb == NULL) { | ||
2403 | int i; | ||
2404 | if (netif_msg_drv(lp)) | ||
2405 | printk(KERN_ERR | ||
2406 | "%s: Memory squeeze, deferring packet.\n", | ||
2407 | dev->name); | ||
2408 | for (i = 0; i < lp->rx_ring_size; i++) | ||
2409 | if ((short) | ||
2410 | le16_to_cpu(lp-> | ||
2411 | rx_ring[(entry + | ||
2412 | i) | ||
2413 | & lp-> | ||
2414 | rx_mod_mask]. | ||
2415 | status) < 0) | ||
2416 | break; | ||
2417 | |||
2418 | if (i > lp->rx_ring_size - 2) { | ||
2419 | lp->stats.rx_dropped++; | ||
2420 | lp->rx_ring[entry].status |= | ||
2421 | le16_to_cpu(0x8000); | ||
2422 | wmb(); /* Make sure adapter sees owner change */ | ||
2423 | lp->cur_rx++; | ||
2424 | } | ||
2425 | break; | ||
2426 | } | ||
2427 | skb->dev = dev; | ||
2428 | if (!rx_in_place) { | ||
2429 | skb_reserve(skb, 2); /* 16 byte align */ | ||
2430 | skb_put(skb, pkt_len); /* Make room */ | ||
2431 | pci_dma_sync_single_for_cpu(lp->pci_dev, | ||
2432 | lp-> | ||
2433 | rx_dma_addr | ||
2434 | [entry], | ||
2435 | PKT_BUF_SZ - | ||
2436 | 2, | ||
2437 | PCI_DMA_FROMDEVICE); | ||
2438 | eth_copy_and_sum(skb, | ||
2439 | (unsigned char *)(lp-> | ||
2440 | rx_skbuff | ||
2441 | [entry]-> | ||
2442 | data), | ||
2443 | pkt_len, 0); | ||
2444 | pci_dma_sync_single_for_device(lp-> | ||
2445 | pci_dev, | ||
2446 | lp-> | ||
2447 | rx_dma_addr | ||
2448 | [entry], | ||
2449 | PKT_BUF_SZ | ||
2450 | - 2, | ||
2451 | PCI_DMA_FROMDEVICE); | ||
2452 | } | ||
2453 | lp->stats.rx_bytes += skb->len; | ||
2454 | skb->protocol = eth_type_trans(skb, dev); | ||
2455 | netif_rx(skb); | ||
2456 | dev->last_rx = jiffies; | ||
2457 | lp->stats.rx_packets++; | ||
2458 | } | ||
2311 | } | 2459 | } |
2312 | lp->stats.rx_bytes += skb->len; | 2460 | /* |
2313 | skb->protocol=eth_type_trans(skb,dev); | 2461 | * The docs say that the buffer length isn't touched, but Andrew Boyd |
2314 | netif_rx(skb); | 2462 | * of QNX reports that some revs of the 79C965 clear it. |
2315 | dev->last_rx = jiffies; | 2463 | */ |
2316 | lp->stats.rx_packets++; | 2464 | lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); |
2317 | } | 2465 | wmb(); /* Make sure owner changes after all others are visible */ |
2466 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | ||
2467 | entry = (++lp->cur_rx) & lp->rx_mod_mask; | ||
2468 | if (--boguscnt <= 0) | ||
2469 | break; /* don't stay in loop forever */ | ||
2318 | } | 2470 | } |
2319 | /* | 2471 | |
2320 | * The docs say that the buffer length isn't touched, but Andrew Boyd | 2472 | return 0; |
2321 | * of QNX reports that some revs of the 79C965 clear it. | ||
2322 | */ | ||
2323 | lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); | ||
2324 | wmb(); /* Make sure owner changes after all others are visible */ | ||
2325 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | ||
2326 | entry = (++lp->cur_rx) & lp->rx_mod_mask; | ||
2327 | if (--boguscnt <= 0) break; /* don't stay in loop forever */ | ||
2328 | } | ||
2329 | |||
2330 | return 0; | ||
2331 | } | 2473 | } |
2332 | 2474 | ||
2333 | static int | 2475 | static int pcnet32_close(struct net_device *dev) |
2334 | pcnet32_close(struct net_device *dev) | ||
2335 | { | 2476 | { |
2336 | unsigned long ioaddr = dev->base_addr; | 2477 | unsigned long ioaddr = dev->base_addr; |
2337 | struct pcnet32_private *lp = dev->priv; | 2478 | struct pcnet32_private *lp = dev->priv; |
2338 | int i; | 2479 | int i; |
2339 | unsigned long flags; | 2480 | unsigned long flags; |
2340 | 2481 | ||
2341 | del_timer_sync(&lp->watchdog_timer); | 2482 | del_timer_sync(&lp->watchdog_timer); |
2342 | 2483 | ||
2343 | netif_stop_queue(dev); | 2484 | netif_stop_queue(dev); |
2344 | 2485 | ||
2345 | spin_lock_irqsave(&lp->lock, flags); | 2486 | spin_lock_irqsave(&lp->lock, flags); |
2346 | 2487 | ||
2347 | lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); | 2488 | lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); |
2348 | 2489 | ||
2349 | if (netif_msg_ifdown(lp)) | 2490 | if (netif_msg_ifdown(lp)) |
2350 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", | 2491 | printk(KERN_DEBUG |
2351 | dev->name, lp->a.read_csr (ioaddr, 0)); | 2492 | "%s: Shutting down ethercard, status was %2.2x.\n", |
2493 | dev->name, lp->a.read_csr(ioaddr, 0)); | ||
2352 | 2494 | ||
2353 | /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ | 2495 | /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ |
2354 | lp->a.write_csr (ioaddr, 0, 0x0004); | 2496 | lp->a.write_csr(ioaddr, 0, 0x0004); |
2355 | 2497 | ||
2356 | /* | 2498 | /* |
2357 | * Switch back to 16bit mode to avoid problems with dumb | 2499 | * Switch back to 16bit mode to avoid problems with dumb |
2358 | * DOS packet driver after a warm reboot | 2500 | * DOS packet driver after a warm reboot |
2359 | */ | 2501 | */ |
2360 | lp->a.write_bcr (ioaddr, 20, 4); | 2502 | lp->a.write_bcr(ioaddr, 20, 4); |
2361 | 2503 | ||
2362 | spin_unlock_irqrestore(&lp->lock, flags); | 2504 | spin_unlock_irqrestore(&lp->lock, flags); |
2363 | 2505 | ||
2364 | free_irq(dev->irq, dev); | 2506 | free_irq(dev->irq, dev); |
2365 | 2507 | ||
2366 | spin_lock_irqsave(&lp->lock, flags); | 2508 | spin_lock_irqsave(&lp->lock, flags); |
2367 | 2509 | ||
2368 | /* free all allocated skbuffs */ | 2510 | /* free all allocated skbuffs */ |
2369 | for (i = 0; i < lp->rx_ring_size; i++) { | 2511 | for (i = 0; i < lp->rx_ring_size; i++) { |
2370 | lp->rx_ring[i].status = 0; | 2512 | lp->rx_ring[i].status = 0; |
2371 | wmb(); /* Make sure adapter sees owner change */ | 2513 | wmb(); /* Make sure adapter sees owner change */ |
2372 | if (lp->rx_skbuff[i]) { | 2514 | if (lp->rx_skbuff[i]) { |
2373 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, | 2515 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], |
2374 | PCI_DMA_FROMDEVICE); | 2516 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
2375 | dev_kfree_skb(lp->rx_skbuff[i]); | 2517 | dev_kfree_skb(lp->rx_skbuff[i]); |
2518 | } | ||
2519 | lp->rx_skbuff[i] = NULL; | ||
2520 | lp->rx_dma_addr[i] = 0; | ||
2376 | } | 2521 | } |
2377 | lp->rx_skbuff[i] = NULL; | ||
2378 | lp->rx_dma_addr[i] = 0; | ||
2379 | } | ||
2380 | 2522 | ||
2381 | for (i = 0; i < lp->tx_ring_size; i++) { | 2523 | for (i = 0; i < lp->tx_ring_size; i++) { |
2382 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | 2524 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ |
2383 | wmb(); /* Make sure adapter sees owner change */ | 2525 | wmb(); /* Make sure adapter sees owner change */ |
2384 | if (lp->tx_skbuff[i]) { | 2526 | if (lp->tx_skbuff[i]) { |
2385 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | 2527 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], |
2386 | lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); | 2528 | lp->tx_skbuff[i]->len, |
2387 | dev_kfree_skb(lp->tx_skbuff[i]); | 2529 | PCI_DMA_TODEVICE); |
2530 | dev_kfree_skb(lp->tx_skbuff[i]); | ||
2531 | } | ||
2532 | lp->tx_skbuff[i] = NULL; | ||
2533 | lp->tx_dma_addr[i] = 0; | ||
2388 | } | 2534 | } |
2389 | lp->tx_skbuff[i] = NULL; | ||
2390 | lp->tx_dma_addr[i] = 0; | ||
2391 | } | ||
2392 | 2535 | ||
2393 | spin_unlock_irqrestore(&lp->lock, flags); | 2536 | spin_unlock_irqrestore(&lp->lock, flags); |
2394 | 2537 | ||
2395 | return 0; | 2538 | return 0; |
2396 | } | 2539 | } |
2397 | 2540 | ||
2398 | static struct net_device_stats * | 2541 | static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) |
2399 | pcnet32_get_stats(struct net_device *dev) | ||
2400 | { | 2542 | { |
2401 | struct pcnet32_private *lp = dev->priv; | 2543 | struct pcnet32_private *lp = dev->priv; |
2402 | unsigned long ioaddr = dev->base_addr; | 2544 | unsigned long ioaddr = dev->base_addr; |
2403 | u16 saved_addr; | 2545 | u16 saved_addr; |
2404 | unsigned long flags; | 2546 | unsigned long flags; |
2405 | 2547 | ||
2406 | spin_lock_irqsave(&lp->lock, flags); | 2548 | spin_lock_irqsave(&lp->lock, flags); |
2407 | saved_addr = lp->a.read_rap(ioaddr); | 2549 | saved_addr = lp->a.read_rap(ioaddr); |
2408 | lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); | 2550 | lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); |
2409 | lp->a.write_rap(ioaddr, saved_addr); | 2551 | lp->a.write_rap(ioaddr, saved_addr); |
2410 | spin_unlock_irqrestore(&lp->lock, flags); | 2552 | spin_unlock_irqrestore(&lp->lock, flags); |
2411 | 2553 | ||
2412 | return &lp->stats; | 2554 | return &lp->stats; |
2413 | } | 2555 | } |
2414 | 2556 | ||
2415 | /* taken from the sunlance driver, which it took from the depca driver */ | 2557 | /* taken from the sunlance driver, which it took from the depca driver */ |
2416 | static void pcnet32_load_multicast (struct net_device *dev) | 2558 | static void pcnet32_load_multicast(struct net_device *dev) |
2417 | { | 2559 | { |
2418 | struct pcnet32_private *lp = dev->priv; | 2560 | struct pcnet32_private *lp = dev->priv; |
2419 | volatile struct pcnet32_init_block *ib = &lp->init_block; | 2561 | volatile struct pcnet32_init_block *ib = &lp->init_block; |
2420 | volatile u16 *mcast_table = (u16 *)&ib->filter; | 2562 | volatile u16 *mcast_table = (u16 *) & ib->filter; |
2421 | struct dev_mc_list *dmi=dev->mc_list; | 2563 | struct dev_mc_list *dmi = dev->mc_list; |
2422 | char *addrs; | 2564 | char *addrs; |
2423 | int i; | 2565 | int i; |
2424 | u32 crc; | 2566 | u32 crc; |
2425 | 2567 | ||
2426 | /* set all multicast bits */ | 2568 | /* set all multicast bits */ |
2427 | if (dev->flags & IFF_ALLMULTI) { | 2569 | if (dev->flags & IFF_ALLMULTI) { |
2428 | ib->filter[0] = 0xffffffff; | 2570 | ib->filter[0] = 0xffffffff; |
2429 | ib->filter[1] = 0xffffffff; | 2571 | ib->filter[1] = 0xffffffff; |
2572 | return; | ||
2573 | } | ||
2574 | /* clear the multicast filter */ | ||
2575 | ib->filter[0] = 0; | ||
2576 | ib->filter[1] = 0; | ||
2577 | |||
2578 | /* Add addresses */ | ||
2579 | for (i = 0; i < dev->mc_count; i++) { | ||
2580 | addrs = dmi->dmi_addr; | ||
2581 | dmi = dmi->next; | ||
2582 | |||
2583 | /* multicast address? */ | ||
2584 | if (!(*addrs & 1)) | ||
2585 | continue; | ||
2586 | |||
2587 | crc = ether_crc_le(6, addrs); | ||
2588 | crc = crc >> 26; | ||
2589 | mcast_table[crc >> 4] = | ||
2590 | le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | | ||
2591 | (1 << (crc & 0xf))); | ||
2592 | } | ||
2430 | return; | 2593 | return; |
2431 | } | ||
2432 | /* clear the multicast filter */ | ||
2433 | ib->filter[0] = 0; | ||
2434 | ib->filter[1] = 0; | ||
2435 | |||
2436 | /* Add addresses */ | ||
2437 | for (i = 0; i < dev->mc_count; i++) { | ||
2438 | addrs = dmi->dmi_addr; | ||
2439 | dmi = dmi->next; | ||
2440 | |||
2441 | /* multicast address? */ | ||
2442 | if (!(*addrs & 1)) | ||
2443 | continue; | ||
2444 | |||
2445 | crc = ether_crc_le(6, addrs); | ||
2446 | crc = crc >> 26; | ||
2447 | mcast_table [crc >> 4] = le16_to_cpu( | ||
2448 | le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf))); | ||
2449 | } | ||
2450 | return; | ||
2451 | } | 2594 | } |
2452 | 2595 | ||
2453 | |||
2454 | /* | 2596 | /* |
2455 | * Set or clear the multicast filter for this adaptor. | 2597 | * Set or clear the multicast filter for this adaptor. |
2456 | */ | 2598 | */ |
2457 | static void pcnet32_set_multicast_list(struct net_device *dev) | 2599 | static void pcnet32_set_multicast_list(struct net_device *dev) |
2458 | { | 2600 | { |
2459 | unsigned long ioaddr = dev->base_addr, flags; | 2601 | unsigned long ioaddr = dev->base_addr, flags; |
2460 | struct pcnet32_private *lp = dev->priv; | 2602 | struct pcnet32_private *lp = dev->priv; |
2461 | 2603 | ||
2462 | spin_lock_irqsave(&lp->lock, flags); | 2604 | spin_lock_irqsave(&lp->lock, flags); |
2463 | if (dev->flags&IFF_PROMISC) { | 2605 | if (dev->flags & IFF_PROMISC) { |
2464 | /* Log any net taps. */ | 2606 | /* Log any net taps. */ |
2465 | if (netif_msg_hw(lp)) | 2607 | if (netif_msg_hw(lp)) |
2466 | printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); | 2608 | printk(KERN_INFO "%s: Promiscuous mode enabled.\n", |
2467 | lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); | 2609 | dev->name); |
2468 | } else { | 2610 | lp->init_block.mode = |
2469 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 2611 | le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << |
2470 | pcnet32_load_multicast (dev); | 2612 | 7); |
2471 | } | 2613 | } else { |
2472 | 2614 | lp->init_block.mode = | |
2473 | lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ | 2615 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
2474 | pcnet32_restart(dev, 0x0042); /* Resume normal operation */ | 2616 | pcnet32_load_multicast(dev); |
2475 | netif_wake_queue(dev); | 2617 | } |
2476 | 2618 | ||
2477 | spin_unlock_irqrestore(&lp->lock, flags); | 2619 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ |
2620 | pcnet32_restart(dev, 0x0042); /* Resume normal operation */ | ||
2621 | netif_wake_queue(dev); | ||
2622 | |||
2623 | spin_unlock_irqrestore(&lp->lock, flags); | ||
2478 | } | 2624 | } |
2479 | 2625 | ||
2480 | /* This routine assumes that the lp->lock is held */ | 2626 | /* This routine assumes that the lp->lock is held */ |
2481 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num) | 2627 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num) |
2482 | { | 2628 | { |
2483 | struct pcnet32_private *lp = dev->priv; | 2629 | struct pcnet32_private *lp = dev->priv; |
2484 | unsigned long ioaddr = dev->base_addr; | 2630 | unsigned long ioaddr = dev->base_addr; |
2485 | u16 val_out; | 2631 | u16 val_out; |
2486 | 2632 | ||
2487 | if (!lp->mii) | 2633 | if (!lp->mii) |
2488 | return 0; | 2634 | return 0; |
2489 | 2635 | ||
2490 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); | 2636 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); |
2491 | val_out = lp->a.read_bcr(ioaddr, 34); | 2637 | val_out = lp->a.read_bcr(ioaddr, 34); |
2492 | 2638 | ||
2493 | return val_out; | 2639 | return val_out; |
2494 | } | 2640 | } |
2495 | 2641 | ||
2496 | /* This routine assumes that the lp->lock is held */ | 2642 | /* This routine assumes that the lp->lock is held */ |
2497 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) | 2643 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) |
2498 | { | 2644 | { |
2499 | struct pcnet32_private *lp = dev->priv; | 2645 | struct pcnet32_private *lp = dev->priv; |
2500 | unsigned long ioaddr = dev->base_addr; | 2646 | unsigned long ioaddr = dev->base_addr; |
2501 | 2647 | ||
2502 | if (!lp->mii) | 2648 | if (!lp->mii) |
2503 | return; | 2649 | return; |
2504 | 2650 | ||
2505 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); | 2651 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); |
2506 | lp->a.write_bcr(ioaddr, 34, val); | 2652 | lp->a.write_bcr(ioaddr, 34, val); |
2507 | } | 2653 | } |
2508 | 2654 | ||
2509 | static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 2655 | static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2510 | { | 2656 | { |
2511 | struct pcnet32_private *lp = dev->priv; | 2657 | struct pcnet32_private *lp = dev->priv; |
2512 | int rc; | 2658 | int rc; |
2513 | unsigned long flags; | 2659 | unsigned long flags; |
2514 | 2660 | ||
2515 | /* SIOC[GS]MIIxxx ioctls */ | 2661 | /* SIOC[GS]MIIxxx ioctls */ |
2516 | if (lp->mii) { | 2662 | if (lp->mii) { |
2517 | spin_lock_irqsave(&lp->lock, flags); | 2663 | spin_lock_irqsave(&lp->lock, flags); |
2518 | rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); | 2664 | rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); |
2519 | spin_unlock_irqrestore(&lp->lock, flags); | 2665 | spin_unlock_irqrestore(&lp->lock, flags); |
2520 | } else { | 2666 | } else { |
2521 | rc = -EOPNOTSUPP; | 2667 | rc = -EOPNOTSUPP; |
2522 | } | 2668 | } |
2523 | 2669 | ||
2524 | return rc; | 2670 | return rc; |
2525 | } | 2671 | } |
2526 | 2672 | ||
2527 | static int pcnet32_check_otherphy(struct net_device *dev) | 2673 | static int pcnet32_check_otherphy(struct net_device *dev) |
2528 | { | 2674 | { |
2529 | struct pcnet32_private *lp = dev->priv; | 2675 | struct pcnet32_private *lp = dev->priv; |
2530 | struct mii_if_info mii = lp->mii_if; | 2676 | struct mii_if_info mii = lp->mii_if; |
2531 | u16 bmcr; | 2677 | u16 bmcr; |
2532 | int i; | 2678 | int i; |
2533 | |||
2534 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { | ||
2535 | if (i == lp->mii_if.phy_id) | ||
2536 | continue; /* skip active phy */ | ||
2537 | if (lp->phymask & (1 << i)) { | ||
2538 | mii.phy_id = i; | ||
2539 | if (mii_link_ok(&mii)) { | ||
2540 | /* found PHY with active link */ | ||
2541 | if (netif_msg_link(lp)) | ||
2542 | printk(KERN_INFO "%s: Using PHY number %d.\n", dev->name, i); | ||
2543 | |||
2544 | /* isolate inactive phy */ | ||
2545 | bmcr = mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); | ||
2546 | mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, bmcr | BMCR_ISOLATE); | ||
2547 | |||
2548 | /* de-isolate new phy */ | ||
2549 | bmcr = mdio_read(dev, i, MII_BMCR); | ||
2550 | mdio_write(dev, i, MII_BMCR, bmcr & ~BMCR_ISOLATE); | ||
2551 | 2679 | ||
2552 | /* set new phy address */ | 2680 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { |
2553 | lp->mii_if.phy_id = i; | 2681 | if (i == lp->mii_if.phy_id) |
2554 | return 1; | 2682 | continue; /* skip active phy */ |
2555 | } | 2683 | if (lp->phymask & (1 << i)) { |
2684 | mii.phy_id = i; | ||
2685 | if (mii_link_ok(&mii)) { | ||
2686 | /* found PHY with active link */ | ||
2687 | if (netif_msg_link(lp)) | ||
2688 | printk(KERN_INFO | ||
2689 | "%s: Using PHY number %d.\n", | ||
2690 | dev->name, i); | ||
2691 | |||
2692 | /* isolate inactive phy */ | ||
2693 | bmcr = | ||
2694 | mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); | ||
2695 | mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, | ||
2696 | bmcr | BMCR_ISOLATE); | ||
2697 | |||
2698 | /* de-isolate new phy */ | ||
2699 | bmcr = mdio_read(dev, i, MII_BMCR); | ||
2700 | mdio_write(dev, i, MII_BMCR, | ||
2701 | bmcr & ~BMCR_ISOLATE); | ||
2702 | |||
2703 | /* set new phy address */ | ||
2704 | lp->mii_if.phy_id = i; | ||
2705 | return 1; | ||
2706 | } | ||
2707 | } | ||
2556 | } | 2708 | } |
2557 | } | 2709 | return 0; |
2558 | return 0; | ||
2559 | } | 2710 | } |
2560 | 2711 | ||
2561 | /* | 2712 | /* |
@@ -2568,51 +2719,53 @@ static int pcnet32_check_otherphy(struct net_device *dev) | |||
2568 | 2719 | ||
2569 | static void pcnet32_check_media(struct net_device *dev, int verbose) | 2720 | static void pcnet32_check_media(struct net_device *dev, int verbose) |
2570 | { | 2721 | { |
2571 | struct pcnet32_private *lp = dev->priv; | 2722 | struct pcnet32_private *lp = dev->priv; |
2572 | int curr_link; | 2723 | int curr_link; |
2573 | int prev_link = netif_carrier_ok(dev) ? 1 : 0; | 2724 | int prev_link = netif_carrier_ok(dev) ? 1 : 0; |
2574 | u32 bcr9; | 2725 | u32 bcr9; |
2575 | 2726 | ||
2576 | if (lp->mii) { | ||
2577 | curr_link = mii_link_ok(&lp->mii_if); | ||
2578 | } else { | ||
2579 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
2580 | curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
2581 | } | ||
2582 | if (!curr_link) { | ||
2583 | if (prev_link || verbose) { | ||
2584 | netif_carrier_off(dev); | ||
2585 | if (netif_msg_link(lp)) | ||
2586 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
2587 | } | ||
2588 | if (lp->phycount > 1) { | ||
2589 | curr_link = pcnet32_check_otherphy(dev); | ||
2590 | prev_link = 0; | ||
2591 | } | ||
2592 | } else if (verbose || !prev_link) { | ||
2593 | netif_carrier_on(dev); | ||
2594 | if (lp->mii) { | 2727 | if (lp->mii) { |
2595 | if (netif_msg_link(lp)) { | 2728 | curr_link = mii_link_ok(&lp->mii_if); |
2596 | struct ethtool_cmd ecmd; | ||
2597 | mii_ethtool_gset(&lp->mii_if, &ecmd); | ||
2598 | printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n", | ||
2599 | dev->name, | ||
2600 | (ecmd.speed == SPEED_100) ? "100" : "10", | ||
2601 | (ecmd.duplex == DUPLEX_FULL) ? "full" : "half"); | ||
2602 | } | ||
2603 | bcr9 = lp->a.read_bcr(dev->base_addr, 9); | ||
2604 | if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { | ||
2605 | if (lp->mii_if.full_duplex) | ||
2606 | bcr9 |= (1 << 0); | ||
2607 | else | ||
2608 | bcr9 &= ~(1 << 0); | ||
2609 | lp->a.write_bcr(dev->base_addr, 9, bcr9); | ||
2610 | } | ||
2611 | } else { | 2729 | } else { |
2612 | if (netif_msg_link(lp)) | 2730 | ulong ioaddr = dev->base_addr; /* card base I/O address */ |
2613 | printk(KERN_INFO "%s: link up\n", dev->name); | 2731 | curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); |
2732 | } | ||
2733 | if (!curr_link) { | ||
2734 | if (prev_link || verbose) { | ||
2735 | netif_carrier_off(dev); | ||
2736 | if (netif_msg_link(lp)) | ||
2737 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
2738 | } | ||
2739 | if (lp->phycount > 1) { | ||
2740 | curr_link = pcnet32_check_otherphy(dev); | ||
2741 | prev_link = 0; | ||
2742 | } | ||
2743 | } else if (verbose || !prev_link) { | ||
2744 | netif_carrier_on(dev); | ||
2745 | if (lp->mii) { | ||
2746 | if (netif_msg_link(lp)) { | ||
2747 | struct ethtool_cmd ecmd; | ||
2748 | mii_ethtool_gset(&lp->mii_if, &ecmd); | ||
2749 | printk(KERN_INFO | ||
2750 | "%s: link up, %sMbps, %s-duplex\n", | ||
2751 | dev->name, | ||
2752 | (ecmd.speed == SPEED_100) ? "100" : "10", | ||
2753 | (ecmd.duplex == | ||
2754 | DUPLEX_FULL) ? "full" : "half"); | ||
2755 | } | ||
2756 | bcr9 = lp->a.read_bcr(dev->base_addr, 9); | ||
2757 | if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { | ||
2758 | if (lp->mii_if.full_duplex) | ||
2759 | bcr9 |= (1 << 0); | ||
2760 | else | ||
2761 | bcr9 &= ~(1 << 0); | ||
2762 | lp->a.write_bcr(dev->base_addr, 9, bcr9); | ||
2763 | } | ||
2764 | } else { | ||
2765 | if (netif_msg_link(lp)) | ||
2766 | printk(KERN_INFO "%s: link up\n", dev->name); | ||
2767 | } | ||
2614 | } | 2768 | } |
2615 | } | ||
2616 | } | 2769 | } |
2617 | 2770 | ||
2618 | /* | 2771 | /* |
@@ -2622,39 +2775,39 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) | |||
2622 | 2775 | ||
2623 | static void pcnet32_watchdog(struct net_device *dev) | 2776 | static void pcnet32_watchdog(struct net_device *dev) |
2624 | { | 2777 | { |
2625 | struct pcnet32_private *lp = dev->priv; | 2778 | struct pcnet32_private *lp = dev->priv; |
2626 | unsigned long flags; | 2779 | unsigned long flags; |
2627 | 2780 | ||
2628 | /* Print the link status if it has changed */ | 2781 | /* Print the link status if it has changed */ |
2629 | spin_lock_irqsave(&lp->lock, flags); | 2782 | spin_lock_irqsave(&lp->lock, flags); |
2630 | pcnet32_check_media(dev, 0); | 2783 | pcnet32_check_media(dev, 0); |
2631 | spin_unlock_irqrestore(&lp->lock, flags); | 2784 | spin_unlock_irqrestore(&lp->lock, flags); |
2632 | 2785 | ||
2633 | mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | 2786 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); |
2634 | } | 2787 | } |
2635 | 2788 | ||
2636 | static void __devexit pcnet32_remove_one(struct pci_dev *pdev) | 2789 | static void __devexit pcnet32_remove_one(struct pci_dev *pdev) |
2637 | { | 2790 | { |
2638 | struct net_device *dev = pci_get_drvdata(pdev); | 2791 | struct net_device *dev = pci_get_drvdata(pdev); |
2639 | 2792 | ||
2640 | if (dev) { | 2793 | if (dev) { |
2641 | struct pcnet32_private *lp = dev->priv; | 2794 | struct pcnet32_private *lp = dev->priv; |
2642 | 2795 | ||
2643 | unregister_netdev(dev); | 2796 | unregister_netdev(dev); |
2644 | pcnet32_free_ring(dev); | 2797 | pcnet32_free_ring(dev); |
2645 | release_region(dev->base_addr, PCNET32_TOTAL_SIZE); | 2798 | release_region(dev->base_addr, PCNET32_TOTAL_SIZE); |
2646 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | 2799 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); |
2647 | free_netdev(dev); | 2800 | free_netdev(dev); |
2648 | pci_disable_device(pdev); | 2801 | pci_disable_device(pdev); |
2649 | pci_set_drvdata(pdev, NULL); | 2802 | pci_set_drvdata(pdev, NULL); |
2650 | } | 2803 | } |
2651 | } | 2804 | } |
2652 | 2805 | ||
2653 | static struct pci_driver pcnet32_driver = { | 2806 | static struct pci_driver pcnet32_driver = { |
2654 | .name = DRV_NAME, | 2807 | .name = DRV_NAME, |
2655 | .probe = pcnet32_probe_pci, | 2808 | .probe = pcnet32_probe_pci, |
2656 | .remove = __devexit_p(pcnet32_remove_one), | 2809 | .remove = __devexit_p(pcnet32_remove_one), |
2657 | .id_table = pcnet32_pci_tbl, | 2810 | .id_table = pcnet32_pci_tbl, |
2658 | }; | 2811 | }; |
2659 | 2812 | ||
2660 | /* An additional parameter that may be passed in... */ | 2813 | /* An additional parameter that may be passed in... */ |
@@ -2665,9 +2818,11 @@ static int pcnet32_have_pci; | |||
2665 | module_param(debug, int, 0); | 2818 | module_param(debug, int, 0); |
2666 | MODULE_PARM_DESC(debug, DRV_NAME " debug level"); | 2819 | MODULE_PARM_DESC(debug, DRV_NAME " debug level"); |
2667 | module_param(max_interrupt_work, int, 0); | 2820 | module_param(max_interrupt_work, int, 0); |
2668 | MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); | 2821 | MODULE_PARM_DESC(max_interrupt_work, |
2822 | DRV_NAME " maximum events handled per interrupt"); | ||
2669 | module_param(rx_copybreak, int, 0); | 2823 | module_param(rx_copybreak, int, 0); |
2670 | MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); | 2824 | MODULE_PARM_DESC(rx_copybreak, |
2825 | DRV_NAME " copy breakpoint for copy-only-tiny-frames"); | ||
2671 | module_param(tx_start_pt, int, 0); | 2826 | module_param(tx_start_pt, int, 0); |
2672 | MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); | 2827 | MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); |
2673 | module_param(pcnet32vlb, int, 0); | 2828 | module_param(pcnet32vlb, int, 0); |
@@ -2678,7 +2833,9 @@ module_param_array(full_duplex, int, NULL, 0); | |||
2678 | MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); | 2833 | MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); |
2679 | /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ | 2834 | /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ |
2680 | module_param_array(homepna, int, NULL, 0); | 2835 | module_param_array(homepna, int, NULL, 0); |
2681 | MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); | 2836 | MODULE_PARM_DESC(homepna, |
2837 | DRV_NAME | ||
2838 | " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); | ||
2682 | 2839 | ||
2683 | MODULE_AUTHOR("Thomas Bogendoerfer"); | 2840 | MODULE_AUTHOR("Thomas Bogendoerfer"); |
2684 | MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); | 2841 | MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); |
@@ -2688,44 +2845,44 @@ MODULE_LICENSE("GPL"); | |||
2688 | 2845 | ||
2689 | static int __init pcnet32_init_module(void) | 2846 | static int __init pcnet32_init_module(void) |
2690 | { | 2847 | { |
2691 | printk(KERN_INFO "%s", version); | 2848 | printk(KERN_INFO "%s", version); |
2692 | 2849 | ||
2693 | pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); | 2850 | pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); |
2694 | 2851 | ||
2695 | if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) | 2852 | if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) |
2696 | tx_start = tx_start_pt; | 2853 | tx_start = tx_start_pt; |
2697 | 2854 | ||
2698 | /* find the PCI devices */ | 2855 | /* find the PCI devices */ |
2699 | if (!pci_module_init(&pcnet32_driver)) | 2856 | if (!pci_module_init(&pcnet32_driver)) |
2700 | pcnet32_have_pci = 1; | 2857 | pcnet32_have_pci = 1; |
2701 | 2858 | ||
2702 | /* should we find any remaining VLbus devices ? */ | 2859 | /* should we find any remaining VLbus devices ? */ |
2703 | if (pcnet32vlb) | 2860 | if (pcnet32vlb) |
2704 | pcnet32_probe_vlbus(); | 2861 | pcnet32_probe_vlbus(); |
2705 | 2862 | ||
2706 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) | 2863 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) |
2707 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); | 2864 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); |
2708 | 2865 | ||
2709 | return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; | 2866 | return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; |
2710 | } | 2867 | } |
2711 | 2868 | ||
2712 | static void __exit pcnet32_cleanup_module(void) | 2869 | static void __exit pcnet32_cleanup_module(void) |
2713 | { | 2870 | { |
2714 | struct net_device *next_dev; | 2871 | struct net_device *next_dev; |
2715 | 2872 | ||
2716 | while (pcnet32_dev) { | 2873 | while (pcnet32_dev) { |
2717 | struct pcnet32_private *lp = pcnet32_dev->priv; | 2874 | struct pcnet32_private *lp = pcnet32_dev->priv; |
2718 | next_dev = lp->next; | 2875 | next_dev = lp->next; |
2719 | unregister_netdev(pcnet32_dev); | 2876 | unregister_netdev(pcnet32_dev); |
2720 | pcnet32_free_ring(pcnet32_dev); | 2877 | pcnet32_free_ring(pcnet32_dev); |
2721 | release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); | 2878 | release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); |
2722 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | 2879 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); |
2723 | free_netdev(pcnet32_dev); | 2880 | free_netdev(pcnet32_dev); |
2724 | pcnet32_dev = next_dev; | 2881 | pcnet32_dev = next_dev; |
2725 | } | 2882 | } |
2726 | 2883 | ||
2727 | if (pcnet32_have_pci) | 2884 | if (pcnet32_have_pci) |
2728 | pci_unregister_driver(&pcnet32_driver); | 2885 | pci_unregister_driver(&pcnet32_driver); |
2729 | } | 2886 | } |
2730 | 2887 | ||
2731 | module_init(pcnet32_init_module); | 2888 | module_init(pcnet32_init_module); |