aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/3c527.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/3c527.c')
-rw-r--r--drivers/net/3c527.c526
1 files changed, 263 insertions, 263 deletions
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 03c0f7176fc9..323b6e510108 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1,7 +1,7 @@
1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6. 1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 * 2 *
3 * (c) Copyright 1998 Red Hat Software Inc 3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox. 4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge. 5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br> 6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz> 7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
@@ -30,12 +30,12 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.
30 * The diagram (Figure 1-1) and the POS summary disagree with the 30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual. 31 * "Interrupt Level" section in the manual.
32 * 32 *
33 * The manual contradicts itself when describing the minimum number 33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command. 34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4. 35 * My card accepts a buffer config of 4/4.
36 * 36 *
37 * Setting the SAV BP bit does not save bad packets, but 37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection. 38 * only enables RX on-card stats collection.
39 * 39 *
40 * The documentation in places seems to miss things. In actual fact 40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just 41 * I've always eventually found everything is documented, it just
@@ -64,16 +64,16 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.
64 * received frames exceeding a configurable length are passed 64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy, 65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off. 66 * in what amounts to a time/space trade-off.
67 * 67 *
68 * The card also keeps a large amount of statistical information 68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no 69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing 70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to 71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW 72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats(). 73 * implementation was made necessary --- see mc32_update_stats().
74 * 74 *
75 * DOC: Notes 75 * DOC: Notes
76 * 76 *
77 * It should be possible to use two or more cards, but at this stage 77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module. 78 * only by loading two copies of the same module.
79 * 79 *
@@ -132,28 +132,28 @@ static unsigned int mc32_debug = NET_DEBUG;
132/* The number of low I/O ports used by the ethercard. */ 132/* The number of low I/O ports used by the ethercard. */
133#define MC32_IO_EXTENT 8 133#define MC32_IO_EXTENT 8
134 134
135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */ 135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
136#define TX_RING_LEN 32 /* Typically the card supports 37 */ 136#define TX_RING_LEN 32 /* Typically the card supports 37 */
137#define RX_RING_LEN 8 /* " " " */ 137#define RX_RING_LEN 8 /* " " " */
138 138
139/* Copy break point, see above for details. 139/* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */ 140 * Setting to > 1512 effectively disables this feature. */
141#define RX_COPYBREAK 200 /* Value from 3c59x.c */ 141#define RX_COPYBREAK 200 /* Value from 3c59x.c */
142 142
143/* Issue the 82586 workaround command - this is for "busy lans", but 143/* Issue the 82586 workaround command - this is for "busy lans", but
144 * basically means for all lans now days - has a performance (latency) 144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */ 145 * cost, but best set. */
146static const int WORKAROUND_82586=1; 146static const int WORKAROUND_82586=1;
147 147
148/* Pointers to buffers and their on-card records */ 148/* Pointers to buffers and their on-card records */
149struct mc32_ring_desc 149struct mc32_ring_desc
150{ 150{
151 volatile struct skb_header *p; 151 volatile struct skb_header *p;
152 struct sk_buff *skb; 152 struct sk_buff *skb;
153}; 153};
154 154
155/* Information that needs to be kept for each board. */ 155/* Information that needs to be kept for each board. */
156struct mc32_local 156struct mc32_local
157{ 157{
158 int slot; 158 int slot;
159 159
@@ -165,7 +165,7 @@ struct mc32_local
165 volatile struct mc32_stats *stats; /* Start of on-card statistics */ 165 volatile struct mc32_stats *stats; /* Start of on-card statistics */
166 u16 tx_chain; /* Transmit list start offset */ 166 u16 tx_chain; /* Transmit list start offset */
167 u16 rx_chain; /* Receive list start offset */ 167 u16 rx_chain; /* Receive list start offset */
168 u16 tx_len; /* Transmit list count */ 168 u16 tx_len; /* Transmit list count */
169 u16 rx_len; /* Receive list count */ 169 u16 rx_len; /* Receive list count */
170 170
171 u16 xceiver_desired_state; /* HALTED or RUNNING */ 171 u16 xceiver_desired_state; /* HALTED or RUNNING */
@@ -180,7 +180,7 @@ struct mc32_local
180 atomic_t tx_ring_head; /* index to tx en-queue end */ 180 atomic_t tx_ring_head; /* index to tx en-queue end */
181 u16 tx_ring_tail; /* index to tx de-queue end */ 181 u16 tx_ring_tail; /* index to tx de-queue end */
182 182
183 u16 rx_ring_tail; /* index to rx de-queue end */ 183 u16 rx_ring_tail; /* index to rx de-queue end */
184 184
185 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ 185 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
186 struct completion execution_cmd; /* Card has completed an execute command */ 186 struct completion execution_cmd; /* Card has completed an execute command */
@@ -204,7 +204,7 @@ static const struct mca_adapters_t mc32_adapters[] = {
204}; 204};
205 205
206 206
207/* Macros for ring index manipulations */ 207/* Macros for ring index manipulations */
208static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); }; 208static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
209static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); }; 209static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
210 210
@@ -259,21 +259,21 @@ struct net_device *__init mc32_probe(int unit)
259 259
260 SET_MODULE_OWNER(dev); 260 SET_MODULE_OWNER(dev);
261 261
262 /* Do not check any supplied i/o locations. 262 /* Do not check any supplied i/o locations.
263 POS registers usually don't fail :) */ 263 POS registers usually don't fail :) */
264 264
265 /* MCA cards have POS registers. 265 /* MCA cards have POS registers.
266 Autodetecting MCA cards is extremely simple. 266 Autodetecting MCA cards is extremely simple.
267 Just search for the card. */ 267 Just search for the card. */
268 268
269 for(i = 0; (mc32_adapters[i].name != NULL); i++) { 269 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
270 current_mca_slot = 270 current_mca_slot =
271 mca_find_unused_adapter(mc32_adapters[i].id, 0); 271 mca_find_unused_adapter(mc32_adapters[i].id, 0);
272 272
273 if(current_mca_slot != MCA_NOTFOUND) { 273 if(current_mca_slot != MCA_NOTFOUND) {
274 if(!mc32_probe1(dev, current_mca_slot)) 274 if(!mc32_probe1(dev, current_mca_slot))
275 { 275 {
276 mca_set_adapter_name(current_mca_slot, 276 mca_set_adapter_name(current_mca_slot,
277 mc32_adapters[i].name); 277 mc32_adapters[i].name);
278 mca_mark_as_used(current_mca_slot); 278 mca_mark_as_used(current_mca_slot);
279 err = register_netdev(dev); 279 err = register_netdev(dev);
@@ -284,7 +284,7 @@ struct net_device *__init mc32_probe(int unit)
284 } 284 }
285 return dev; 285 return dev;
286 } 286 }
287 287
288 } 288 }
289 } 289 }
290 free_netdev(dev); 290 free_netdev(dev);
@@ -298,7 +298,7 @@ struct net_device *__init mc32_probe(int unit)
298 * 298 *
299 * Decode the slot data and configure the card structures. Having done this we 299 * Decode the slot data and configure the card structures. Having done this we
300 * can reset the card and configure it. The card does a full self test cycle 300 * can reset the card and configure it. The card does a full self test cycle
301 * in firmware so we have to wait for it to return and post us either a 301 * in firmware so we have to wait for it to return and post us either a
302 * failure case or some addresses we use to find the board internals. 302 * failure case or some addresses we use to find the board internals.
303 */ 303 */
304 304
@@ -347,7 +347,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
347 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot); 347 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
348 348
349 POS = mca_read_stored_pos(slot, 2); 349 POS = mca_read_stored_pos(slot, 2);
350 350
351 if(!(POS&1)) 351 if(!(POS&1))
352 { 352 {
353 printk(" disabled.\n"); 353 printk(" disabled.\n");
@@ -357,7 +357,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
357 /* Fill in the 'dev' fields. */ 357 /* Fill in the 'dev' fields. */
358 dev->base_addr = mca_io_bases[(POS>>1)&7]; 358 dev->base_addr = mca_io_bases[(POS>>1)&7];
359 dev->mem_start = mca_mem_bases[(POS>>4)&7]; 359 dev->mem_start = mca_mem_bases[(POS>>4)&7];
360 360
361 POS = mca_read_stored_pos(slot, 4); 361 POS = mca_read_stored_pos(slot, 4);
362 if(!(POS&1)) 362 if(!(POS&1))
363 { 363 {
@@ -366,21 +366,21 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
366 } 366 }
367 367
368 POS = mca_read_stored_pos(slot, 5); 368 POS = mca_read_stored_pos(slot, 5);
369 369
370 i=(POS>>4)&3; 370 i=(POS>>4)&3;
371 if(i==3) 371 if(i==3)
372 { 372 {
373 printk("invalid memory window.\n"); 373 printk("invalid memory window.\n");
374 return -ENODEV; 374 return -ENODEV;
375 } 375 }
376 376
377 i*=16384; 377 i*=16384;
378 i+=16384; 378 i+=16384;
379 379
380 dev->mem_end=dev->mem_start + i; 380 dev->mem_end=dev->mem_start + i;
381 381
382 dev->irq = ((POS>>2)&3)+9; 382 dev->irq = ((POS>>2)&3)+9;
383 383
384 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname)) 384 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
385 { 385 {
386 printk("io 0x%3lX, which is busy.\n", dev->base_addr); 386 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
@@ -389,23 +389,23 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
389 389
390 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n", 390 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
391 dev->base_addr, dev->irq, dev->mem_start, i/1024); 391 dev->base_addr, dev->irq, dev->mem_start, i/1024);
392 392
393 393
394 /* We ought to set the cache line size here.. */ 394 /* We ought to set the cache line size here.. */
395 395
396 396
397 /* 397 /*
398 * Go PROM browsing 398 * Go PROM browsing
399 */ 399 */
400 400
401 printk("%s: Address ", dev->name); 401 printk("%s: Address ", dev->name);
402 402
403 /* Retrieve and print the ethernet address. */ 403 /* Retrieve and print the ethernet address. */
404 for (i = 0; i < 6; i++) 404 for (i = 0; i < 6; i++)
405 { 405 {
406 mca_write_pos(slot, 6, i+12); 406 mca_write_pos(slot, 6, i+12);
407 mca_write_pos(slot, 7, 0); 407 mca_write_pos(slot, 7, 0);
408 408
409 printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3)); 409 printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3));
410 } 410 }
411 411
@@ -413,12 +413,12 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
413 mca_write_pos(slot, 7, 0); 413 mca_write_pos(slot, 7, 0);
414 414
415 POS = mca_read_stored_pos(slot, 4); 415 POS = mca_read_stored_pos(slot, 4);
416 416
417 if(POS&2) 417 if(POS&2)
418 printk(" : BNC port selected.\n"); 418 printk(" : BNC port selected.\n");
419 else 419 else
420 printk(" : AUI port selected.\n"); 420 printk(" : AUI port selected.\n");
421 421
422 POS=inb(dev->base_addr+HOST_CTRL); 422 POS=inb(dev->base_addr+HOST_CTRL);
423 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET; 423 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
424 POS&=~HOST_CTRL_INTE; 424 POS&=~HOST_CTRL_INTE;
@@ -428,9 +428,9 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
428 /* Reset off */ 428 /* Reset off */
429 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET); 429 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
430 outb(POS, dev->base_addr+HOST_CTRL); 430 outb(POS, dev->base_addr+HOST_CTRL);
431 431
432 udelay(300); 432 udelay(300);
433 433
434 /* 434 /*
435 * Grab the IRQ 435 * Grab the IRQ
436 */ 436 */
@@ -448,14 +448,14 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
448 i=0; 448 i=0;
449 449
450 base = inb(dev->base_addr); 450 base = inb(dev->base_addr);
451 451
452 while(base == 0xFF) 452 while(base == 0xFF)
453 { 453 {
454 i++; 454 i++;
455 if(i == 1000) 455 if(i == 1000)
456 { 456 {
457 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name); 457 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
458 err = -ENODEV; 458 err = -ENODEV;
459 goto err_exit_irq; 459 goto err_exit_irq;
460 } 460 }
461 udelay(1000); 461 udelay(1000);
@@ -470,15 +470,15 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
470 base<0x0A?" test failure":""); 470 base<0x0A?" test failure":"");
471 else 471 else
472 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base); 472 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
473 err = -ENODEV; 473 err = -ENODEV;
474 goto err_exit_irq; 474 goto err_exit_irq;
475 } 475 }
476 476
477 base=0; 477 base=0;
478 for(i=0;i<4;i++) 478 for(i=0;i<4;i++)
479 { 479 {
480 int n=0; 480 int n=0;
481 481
482 while(!(inb(dev->base_addr+2)&(1<<5))) 482 while(!(inb(dev->base_addr+2)&(1<<5)))
483 { 483 {
484 n++; 484 n++;
@@ -493,31 +493,31 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
493 493
494 base|=(inb(dev->base_addr)<<(8*i)); 494 base|=(inb(dev->base_addr)<<(8*i));
495 } 495 }
496 496
497 lp->exec_box=isa_bus_to_virt(dev->mem_start+base); 497 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
498 498
499 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0]; 499 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
500 500
501 lp->base = dev->mem_start+base; 501 lp->base = dev->mem_start+base;
502 502
503 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]); 503 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
504 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]); 504 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
505 505
506 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]); 506 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
507 507
508 /* 508 /*
509 * Descriptor chains (card relative) 509 * Descriptor chains (card relative)
510 */ 510 */
511 511
512 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ 512 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
513 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ 513 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
514 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ 514 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
515 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ 515 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
516 516
517 init_MUTEX_LOCKED(&lp->cmd_mutex); 517 init_MUTEX_LOCKED(&lp->cmd_mutex);
518 init_completion(&lp->execution_cmd); 518 init_completion(&lp->execution_cmd);
519 init_completion(&lp->xceiver_cmd); 519 init_completion(&lp->xceiver_cmd);
520 520
521 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", 521 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
522 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); 522 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
523 523
@@ -543,12 +543,12 @@ err_exit_ports:
543/** 543/**
544 * mc32_ready_poll - wait until we can feed it a command 544 * mc32_ready_poll - wait until we can feed it a command
545 * @dev: The device to wait for 545 * @dev: The device to wait for
546 * 546 *
547 * Wait until the card becomes ready to accept a command via the 547 * Wait until the card becomes ready to accept a command via the
548 * command register. This tells us nothing about the completion 548 * command register. This tells us nothing about the completion
549 * status of any pending commands and takes very little time at all. 549 * status of any pending commands and takes very little time at all.
550 */ 550 */
551 551
552static inline void mc32_ready_poll(struct net_device *dev) 552static inline void mc32_ready_poll(struct net_device *dev)
553{ 553{
554 int ioaddr = dev->base_addr; 554 int ioaddr = dev->base_addr;
@@ -608,22 +608,22 @@ static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int
608 * 608 *
609 * Sends exec commands in a user context. This permits us to wait around 609 * Sends exec commands in a user context. This permits us to wait around
610 * for the replies and also to wait for the command buffer to complete 610 * for the replies and also to wait for the command buffer to complete
611 * from a previous command before we execute our command. After our 611 * from a previous command before we execute our command. After our
612 * command completes we will attempt any pending multicast reload 612 * command completes we will attempt any pending multicast reload
613 * we blocked off by hogging the exec buffer. 613 * we blocked off by hogging the exec buffer.
614 * 614 *
615 * You feed the card a command, you wait, it interrupts you get a 615 * You feed the card a command, you wait, it interrupts you get a
616 * reply. All well and good. The complication arises because you use 616 * reply. All well and good. The complication arises because you use
617 * commands for filter list changes which come in at bh level from things 617 * commands for filter list changes which come in at bh level from things
618 * like IPV6 group stuff. 618 * like IPV6 group stuff.
619 */ 619 */
620 620
621static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) 621static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
622{ 622{
623 struct mc32_local *lp = netdev_priv(dev); 623 struct mc32_local *lp = netdev_priv(dev);
624 int ioaddr = dev->base_addr; 624 int ioaddr = dev->base_addr;
625 int ret = 0; 625 int ret = 0;
626 626
627 down(&lp->cmd_mutex); 627 down(&lp->cmd_mutex);
628 628
629 /* 629 /*
@@ -640,7 +640,7 @@ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
640 outb(1<<6, ioaddr+HOST_CMD); 640 outb(1<<6, ioaddr+HOST_CMD);
641 641
642 wait_for_completion(&lp->execution_cmd); 642 wait_for_completion(&lp->execution_cmd);
643 643
644 if(lp->exec_box->mbox&(1<<13)) 644 if(lp->exec_box->mbox&(1<<13))
645 ret = -1; 645 ret = -1;
646 646
@@ -664,8 +664,8 @@ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
664 * @dev: The 3c527 card to issue the command to 664 * @dev: The 3c527 card to issue the command to
665 * 665 *
666 * This may be called from the interrupt state, where it is used 666 * This may be called from the interrupt state, where it is used
667 * to restart the rx ring if the card runs out of rx buffers. 667 * to restart the rx ring if the card runs out of rx buffers.
668 * 668 *
669 * We must first check if it's ok to (re)start the transceiver. See 669 * We must first check if it's ok to (re)start the transceiver. See
670 * mc32_close for details. 670 * mc32_close for details.
671 */ 671 */
@@ -675,21 +675,21 @@ static void mc32_start_transceiver(struct net_device *dev) {
675 struct mc32_local *lp = netdev_priv(dev); 675 struct mc32_local *lp = netdev_priv(dev);
676 int ioaddr = dev->base_addr; 676 int ioaddr = dev->base_addr;
677 677
678 /* Ignore RX overflow on device closure */ 678 /* Ignore RX overflow on device closure */
679 if (lp->xceiver_desired_state==HALTED) 679 if (lp->xceiver_desired_state==HALTED)
680 return; 680 return;
681 681
682 /* Give the card the offset to the post-EOL-bit RX descriptor */ 682 /* Give the card the offset to the post-EOL-bit RX descriptor */
683 mc32_ready_poll(dev); 683 mc32_ready_poll(dev);
684 lp->rx_box->mbox=0; 684 lp->rx_box->mbox=0;
685 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next; 685 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
686 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD); 686 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
687 687
688 mc32_ready_poll(dev); 688 mc32_ready_poll(dev);
689 lp->tx_box->mbox=0; 689 lp->tx_box->mbox=0;
690 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */ 690 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
691 691
692 /* We are not interrupted on start completion */ 692 /* We are not interrupted on start completion */
693} 693}
694 694
695 695
@@ -703,21 +703,21 @@ static void mc32_start_transceiver(struct net_device *dev) {
703 * 703 *
704 * We then sleep until the card has notified us that both rx and 704 * We then sleep until the card has notified us that both rx and
705 * tx have been suspended. 705 * tx have been suspended.
706 */ 706 */
707 707
708static void mc32_halt_transceiver(struct net_device *dev) 708static void mc32_halt_transceiver(struct net_device *dev)
709{ 709{
710 struct mc32_local *lp = netdev_priv(dev); 710 struct mc32_local *lp = netdev_priv(dev);
711 int ioaddr = dev->base_addr; 711 int ioaddr = dev->base_addr;
712 712
713 mc32_ready_poll(dev); 713 mc32_ready_poll(dev);
714 lp->rx_box->mbox=0; 714 lp->rx_box->mbox=0;
715 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD); 715 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
716 wait_for_completion(&lp->xceiver_cmd); 716 wait_for_completion(&lp->xceiver_cmd);
717 717
718 mc32_ready_poll(dev); 718 mc32_ready_poll(dev);
719 lp->tx_box->mbox=0; 719 lp->tx_box->mbox=0;
720 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD); 720 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
721 wait_for_completion(&lp->xceiver_cmd); 721 wait_for_completion(&lp->xceiver_cmd);
722} 722}
723 723
@@ -741,14 +741,14 @@ static void mc32_halt_transceiver(struct net_device *dev)
741 * We then set the end-of-list bit for the last entry so that the 741 * We then set the end-of-list bit for the last entry so that the
742 * card will know when it has run out of buffers. 742 * card will know when it has run out of buffers.
743 */ 743 */
744 744
745static int mc32_load_rx_ring(struct net_device *dev) 745static int mc32_load_rx_ring(struct net_device *dev)
746{ 746{
747 struct mc32_local *lp = netdev_priv(dev); 747 struct mc32_local *lp = netdev_priv(dev);
748 int i; 748 int i;
749 u16 rx_base; 749 u16 rx_base;
750 volatile struct skb_header *p; 750 volatile struct skb_header *p;
751 751
752 rx_base=lp->rx_chain; 752 rx_base=lp->rx_chain;
753 753
754 for(i=0; i<RX_RING_LEN; i++) { 754 for(i=0; i<RX_RING_LEN; i++) {
@@ -761,14 +761,14 @@ static int mc32_load_rx_ring(struct net_device *dev)
761 skb_reserve(lp->rx_ring[i].skb, 18); 761 skb_reserve(lp->rx_ring[i].skb, 18);
762 762
763 p=isa_bus_to_virt(lp->base+rx_base); 763 p=isa_bus_to_virt(lp->base+rx_base);
764 764
765 p->control=0; 765 p->control=0;
766 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data); 766 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
767 p->status=0; 767 p->status=0;
768 p->length=1532; 768 p->length=1532;
769 769
770 lp->rx_ring[i].p=p; 770 lp->rx_ring[i].p=p;
771 rx_base=p->next; 771 rx_base=p->next;
772 } 772 }
773 773
774 lp->rx_ring[i-1].p->control |= CONTROL_EOL; 774 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
@@ -776,14 +776,14 @@ static int mc32_load_rx_ring(struct net_device *dev)
776 lp->rx_ring_tail=0; 776 lp->rx_ring_tail=0;
777 777
778 return 0; 778 return 0;
779} 779}
780 780
781 781
782/** 782/**
783 * mc32_flush_rx_ring - free the ring of receive buffers 783 * mc32_flush_rx_ring - free the ring of receive buffers
784 * @lp: Local data of 3c527 to flush the rx ring of 784 * @lp: Local data of 3c527 to flush the rx ring of
785 * 785 *
786 * Free the buffer for each ring slot. This may be called 786 * Free the buffer for each ring slot. This may be called
787 * before mc32_load_rx_ring(), eg. on error in mc32_open(). 787 * before mc32_load_rx_ring(), eg. on error in mc32_open().
788 * Requires rx skb pointers to point to a valid skb, or NULL. 788 * Requires rx skb pointers to point to a valid skb, or NULL.
789 */ 789 */
@@ -791,16 +791,16 @@ static int mc32_load_rx_ring(struct net_device *dev)
791static void mc32_flush_rx_ring(struct net_device *dev) 791static void mc32_flush_rx_ring(struct net_device *dev)
792{ 792{
793 struct mc32_local *lp = netdev_priv(dev); 793 struct mc32_local *lp = netdev_priv(dev);
794 int i; 794 int i;
795 795
796 for(i=0; i < RX_RING_LEN; i++) 796 for(i=0; i < RX_RING_LEN; i++)
797 { 797 {
798 if (lp->rx_ring[i].skb) { 798 if (lp->rx_ring[i].skb) {
799 dev_kfree_skb(lp->rx_ring[i].skb); 799 dev_kfree_skb(lp->rx_ring[i].skb);
800 lp->rx_ring[i].skb = NULL; 800 lp->rx_ring[i].skb = NULL;
801 } 801 }
802 lp->rx_ring[i].p=NULL; 802 lp->rx_ring[i].p=NULL;
803 } 803 }
804} 804}
805 805
806 806
@@ -808,31 +808,31 @@ static void mc32_flush_rx_ring(struct net_device *dev)
808 * mc32_load_tx_ring - load transmit ring 808 * mc32_load_tx_ring - load transmit ring
809 * @dev: The 3c527 card to issue the command to 809 * @dev: The 3c527 card to issue the command to
810 * 810 *
811 * This sets up the host transmit data-structures. 811 * This sets up the host transmit data-structures.
812 * 812 *
813 * First, we obtain from the card it's current postion in the tx 813 * First, we obtain from the card it's current postion in the tx
814 * ring, so that we will know where to begin transmitting 814 * ring, so that we will know where to begin transmitting
815 * packets. 815 * packets.
816 * 816 *
817 * Then, we read the 'next' pointers from the on-card tx ring into 817 * Then, we read the 'next' pointers from the on-card tx ring into
818 * our tx_ring array to reduce slow shared-mem reads. Finally, we 818 * our tx_ring array to reduce slow shared-mem reads. Finally, we
819 * intitalise the tx house keeping variables. 819 * intitalise the tx house keeping variables.
820 * 820 *
821 */ 821 */
822 822
823static void mc32_load_tx_ring(struct net_device *dev) 823static void mc32_load_tx_ring(struct net_device *dev)
824{ 824{
825 struct mc32_local *lp = netdev_priv(dev); 825 struct mc32_local *lp = netdev_priv(dev);
826 volatile struct skb_header *p; 826 volatile struct skb_header *p;
827 int i; 827 int i;
828 u16 tx_base; 828 u16 tx_base;
829 829
830 tx_base=lp->tx_box->data[0]; 830 tx_base=lp->tx_box->data[0];
831 831
832 for(i=0 ; i<TX_RING_LEN ; i++) 832 for(i=0 ; i<TX_RING_LEN ; i++)
833 { 833 {
834 p=isa_bus_to_virt(lp->base+tx_base); 834 p=isa_bus_to_virt(lp->base+tx_base);
835 lp->tx_ring[i].p=p; 835 lp->tx_ring[i].p=p;
836 lp->tx_ring[i].skb=NULL; 836 lp->tx_ring[i].skb=NULL;
837 837
838 tx_base=p->next; 838 tx_base=p->next;
@@ -841,10 +841,10 @@ static void mc32_load_tx_ring(struct net_device *dev)
841 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */ 841 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
842 /* see mc32_tx_ring */ 842 /* see mc32_tx_ring */
843 843
844 atomic_set(&lp->tx_count, TX_RING_LEN-1); 844 atomic_set(&lp->tx_count, TX_RING_LEN-1);
845 atomic_set(&lp->tx_ring_head, 0); 845 atomic_set(&lp->tx_ring_head, 0);
846 lp->tx_ring_tail=0; 846 lp->tx_ring_tail=0;
847} 847}
848 848
849 849
850/** 850/**
@@ -871,11 +871,11 @@ static void mc32_flush_tx_ring(struct net_device *dev)
871 } 871 }
872 } 872 }
873 873
874 atomic_set(&lp->tx_count, 0); 874 atomic_set(&lp->tx_count, 0);
875 atomic_set(&lp->tx_ring_head, 0); 875 atomic_set(&lp->tx_ring_head, 0);
876 lp->tx_ring_tail=0; 876 lp->tx_ring_tail=0;
877} 877}
878 878
879 879
880/** 880/**
881 * mc32_open - handle 'up' of card 881 * mc32_open - handle 'up' of card
@@ -909,7 +909,7 @@ static int mc32_open(struct net_device *dev)
909 regs=inb(ioaddr+HOST_CTRL); 909 regs=inb(ioaddr+HOST_CTRL);
910 regs|=HOST_CTRL_INTE; 910 regs|=HOST_CTRL_INTE;
911 outb(regs, ioaddr+HOST_CTRL); 911 outb(regs, ioaddr+HOST_CTRL);
912 912
913 /* 913 /*
914 * Allow ourselves to issue commands 914 * Allow ourselves to issue commands
915 */ 915 */
@@ -924,52 +924,52 @@ static int mc32_open(struct net_device *dev)
924 mc32_command(dev, 4, &one, 2); 924 mc32_command(dev, 4, &one, 2);
925 925
926 /* 926 /*
927 * Poke it to make sure it's really dead. 927 * Poke it to make sure it's really dead.
928 */ 928 */
929 929
930 mc32_halt_transceiver(dev); 930 mc32_halt_transceiver(dev);
931 mc32_flush_tx_ring(dev); 931 mc32_flush_tx_ring(dev);
932 932
933 /* 933 /*
934 * Ask card to set up on-card descriptors to our spec 934 * Ask card to set up on-card descriptors to our spec
935 */ 935 */
936 936
937 if(mc32_command(dev, 8, descnumbuffs, 4)) { 937 if(mc32_command(dev, 8, descnumbuffs, 4)) {
938 printk("%s: %s rejected our buffer configuration!\n", 938 printk("%s: %s rejected our buffer configuration!\n",
939 dev->name, cardname); 939 dev->name, cardname);
940 mc32_close(dev); 940 mc32_close(dev);
941 return -ENOBUFS; 941 return -ENOBUFS;
942 } 942 }
943 943
944 /* Report new configuration */ 944 /* Report new configuration */
945 mc32_command(dev, 6, NULL, 0); 945 mc32_command(dev, 6, NULL, 0);
946 946
947 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ 947 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
948 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ 948 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
949 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ 949 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
950 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ 950 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
951 951
952 /* Set Network Address */ 952 /* Set Network Address */
953 mc32_command(dev, 1, dev->dev_addr, 6); 953 mc32_command(dev, 1, dev->dev_addr, 6);
954 954
955 /* Set the filters */ 955 /* Set the filters */
956 mc32_set_multicast_list(dev); 956 mc32_set_multicast_list(dev);
957 957
958 if (WORKAROUND_82586) { 958 if (WORKAROUND_82586) {
959 u16 zero_word=0; 959 u16 zero_word=0;
960 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */ 960 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
961 } 961 }
962 962
963 mc32_load_tx_ring(dev); 963 mc32_load_tx_ring(dev);
964 964
965 if(mc32_load_rx_ring(dev)) 965 if(mc32_load_rx_ring(dev))
966 { 966 {
967 mc32_close(dev); 967 mc32_close(dev);
968 return -ENOBUFS; 968 return -ENOBUFS;
969 } 969 }
970 970
971 lp->xceiver_desired_state = RUNNING; 971 lp->xceiver_desired_state = RUNNING;
972 972
973 /* And finally, set the ball rolling... */ 973 /* And finally, set the ball rolling... */
974 mc32_start_transceiver(dev); 974 mc32_start_transceiver(dev);
975 975
@@ -1015,14 +1015,14 @@ static void mc32_timeout(struct net_device *dev)
1015 * after we've established a valid packet on the tx ring (and 1015 * after we've established a valid packet on the tx ring (and
1016 * before we let the card "see" it, to prevent it racing with the 1016 * before we let the card "see" it, to prevent it racing with the
1017 * irq handler). 1017 * irq handler).
1018 * 1018 *
1019 */ 1019 */
1020 1020
1021static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev) 1021static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1022{ 1022{
1023 struct mc32_local *lp = netdev_priv(dev); 1023 struct mc32_local *lp = netdev_priv(dev);
1024 u32 head = atomic_read(&lp->tx_ring_head); 1024 u32 head = atomic_read(&lp->tx_ring_head);
1025 1025
1026 volatile struct skb_header *p, *np; 1026 volatile struct skb_header *p, *np;
1027 1027
1028 netif_stop_queue(dev); 1028 netif_stop_queue(dev);
@@ -1036,31 +1036,31 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1036 return 0; 1036 return 0;
1037 } 1037 }
1038 1038
1039 atomic_dec(&lp->tx_count); 1039 atomic_dec(&lp->tx_count);
1040 1040
1041 /* P is the last sending/sent buffer as a pointer */ 1041 /* P is the last sending/sent buffer as a pointer */
1042 p=lp->tx_ring[head].p; 1042 p=lp->tx_ring[head].p;
1043 1043
1044 head = next_tx(head); 1044 head = next_tx(head);
1045 1045
1046 /* NP is the buffer we will be loading */ 1046 /* NP is the buffer we will be loading */
1047 np=lp->tx_ring[head].p; 1047 np=lp->tx_ring[head].p;
1048 1048
1049 /* We will need this to flush the buffer out */ 1049 /* We will need this to flush the buffer out */
1050 lp->tx_ring[head].skb=skb; 1050 lp->tx_ring[head].skb=skb;
1051 1051
1052 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 1052 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1053 np->data = isa_virt_to_bus(skb->data); 1053 np->data = isa_virt_to_bus(skb->data);
1054 np->status = 0; 1054 np->status = 0;
1055 np->control = CONTROL_EOP | CONTROL_EOL; 1055 np->control = CONTROL_EOP | CONTROL_EOL;
1056 wmb(); 1056 wmb();
1057 1057
1058 /* 1058 /*
1059 * The new frame has been setup; we can now 1059 * The new frame has been setup; we can now
1060 * let the interrupt handler and card "see" it 1060 * let the interrupt handler and card "see" it
1061 */ 1061 */
1062 1062
1063 atomic_set(&lp->tx_ring_head, head); 1063 atomic_set(&lp->tx_ring_head, head);
1064 p->control &= ~CONTROL_EOL; 1064 p->control &= ~CONTROL_EOL;
1065 1065
1066 netif_wake_queue(dev); 1066 netif_wake_queue(dev);
@@ -1072,13 +1072,13 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1072 * mc32_update_stats - pull off the on board statistics 1072 * mc32_update_stats - pull off the on board statistics
1073 * @dev: 3c527 to service 1073 * @dev: 3c527 to service
1074 * 1074 *
1075 * 1075 *
1076 * Query and reset the on-card stats. There's the small possibility 1076 * Query and reset the on-card stats. There's the small possibility
1077 * of a race here, which would result in an underestimation of 1077 * of a race here, which would result in an underestimation of
1078 * actual errors. As such, we'd prefer to keep all our stats 1078 * actual errors. As such, we'd prefer to keep all our stats
1079 * collection in software. As a rule, we do. However it can't be 1079 * collection in software. As a rule, we do. However it can't be
1080 * used for rx errors and collisions as, by default, the card discards 1080 * used for rx errors and collisions as, by default, the card discards
1081 * bad rx packets. 1081 * bad rx packets.
1082 * 1082 *
1083 * Setting the SAV BP in the rx filter command supposedly 1083 * Setting the SAV BP in the rx filter command supposedly
1084 * stops this behaviour. However, testing shows that it only seems to 1084 * stops this behaviour. However, testing shows that it only seems to
@@ -1090,30 +1090,30 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1090static void mc32_update_stats(struct net_device *dev) 1090static void mc32_update_stats(struct net_device *dev)
1091{ 1091{
1092 struct mc32_local *lp = netdev_priv(dev); 1092 struct mc32_local *lp = netdev_priv(dev);
1093 volatile struct mc32_stats *st = lp->stats; 1093 volatile struct mc32_stats *st = lp->stats;
1094 1094
1095 u32 rx_errors=0; 1095 u32 rx_errors=0;
1096 1096
1097 rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors; 1097 rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors;
1098 st->rx_crc_errors=0; 1098 st->rx_crc_errors=0;
1099 rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors; 1099 rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors;
1100 st->rx_overrun_errors=0; 1100 st->rx_overrun_errors=0;
1101 rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors; 1101 rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors;
1102 st->rx_alignment_errors=0; 1102 st->rx_alignment_errors=0;
1103 rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors; 1103 rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors;
1104 st->rx_tooshort_errors=0; 1104 st->rx_tooshort_errors=0;
1105 rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors; 1105 rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
1106 st->rx_outofresource_errors=0; 1106 st->rx_outofresource_errors=0;
1107 lp->net_stats.rx_errors=rx_errors; 1107 lp->net_stats.rx_errors=rx_errors;
1108 1108
1109 /* Number of packets which saw one collision */ 1109 /* Number of packets which saw one collision */
1110 lp->net_stats.collisions+=st->dataC[10]; 1110 lp->net_stats.collisions+=st->dataC[10];
1111 st->dataC[10]=0; 1111 st->dataC[10]=0;
1112 1112
1113 /* Number of packets which saw 2--15 collisions */ 1113 /* Number of packets which saw 2--15 collisions */
1114 lp->net_stats.collisions+=st->dataC[11]; 1114 lp->net_stats.collisions+=st->dataC[11];
1115 st->dataC[11]=0; 1115 st->dataC[11]=0;
1116} 1116}
1117 1117
1118 1118
1119/** 1119/**
@@ -1130,7 +1130,7 @@ static void mc32_update_stats(struct net_device *dev)
1130 * For each completed packet, we will either copy it and pass it up 1130 * For each completed packet, we will either copy it and pass it up
1131 * the stack or, if the packet is near MTU sized, we allocate 1131 * the stack or, if the packet is near MTU sized, we allocate
1132 * another buffer and flip the old one up the stack. 1132 * another buffer and flip the old one up the stack.
1133 * 1133 *
1134 * We must succeed in keeping a buffer on the ring. If necessary we 1134 * We must succeed in keeping a buffer on the ring. If necessary we
1135 * will toss a received packet rather than lose a ring entry. Once 1135 * will toss a received packet rather than lose a ring entry. Once
1136 * the first uncompleted descriptor is found, we move the 1136 * the first uncompleted descriptor is found, we move the
@@ -1147,72 +1147,72 @@ static void mc32_rx_ring(struct net_device *dev)
1147 int x=0; 1147 int x=0;
1148 1148
1149 rx_old_tail = rx_ring_tail = lp->rx_ring_tail; 1149 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1150 1150
1151 do 1151 do
1152 { 1152 {
1153 p=lp->rx_ring[rx_ring_tail].p; 1153 p=lp->rx_ring[rx_ring_tail].p;
1154 1154
1155 if(!(p->status & (1<<7))) { /* Not COMPLETED */ 1155 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1156 break; 1156 break;
1157 } 1157 }
1158 if(p->status & (1<<6)) /* COMPLETED_OK */ 1158 if(p->status & (1<<6)) /* COMPLETED_OK */
1159 { 1159 {
1160 1160
1161 u16 length=p->length; 1161 u16 length=p->length;
1162 struct sk_buff *skb; 1162 struct sk_buff *skb;
1163 struct sk_buff *newskb; 1163 struct sk_buff *newskb;
1164 1164
1165 /* Try to save time by avoiding a copy on big frames */ 1165 /* Try to save time by avoiding a copy on big frames */
1166 1166
1167 if ((length > RX_COPYBREAK) 1167 if ((length > RX_COPYBREAK)
1168 && ((newskb=dev_alloc_skb(1532)) != NULL)) 1168 && ((newskb=dev_alloc_skb(1532)) != NULL))
1169 { 1169 {
1170 skb=lp->rx_ring[rx_ring_tail].skb; 1170 skb=lp->rx_ring[rx_ring_tail].skb;
1171 skb_put(skb, length); 1171 skb_put(skb, length);
1172 1172
1173 skb_reserve(newskb,18); 1173 skb_reserve(newskb,18);
1174 lp->rx_ring[rx_ring_tail].skb=newskb; 1174 lp->rx_ring[rx_ring_tail].skb=newskb;
1175 p->data=isa_virt_to_bus(newskb->data); 1175 p->data=isa_virt_to_bus(newskb->data);
1176 } 1176 }
1177 else 1177 else
1178 { 1178 {
1179 skb=dev_alloc_skb(length+2); 1179 skb=dev_alloc_skb(length+2);
1180 1180
1181 if(skb==NULL) { 1181 if(skb==NULL) {
1182 lp->net_stats.rx_dropped++; 1182 lp->net_stats.rx_dropped++;
1183 goto dropped; 1183 goto dropped;
1184 } 1184 }
1185 1185
1186 skb_reserve(skb,2); 1186 skb_reserve(skb,2);
1187 memcpy(skb_put(skb, length), 1187 memcpy(skb_put(skb, length),
1188 lp->rx_ring[rx_ring_tail].skb->data, length); 1188 lp->rx_ring[rx_ring_tail].skb->data, length);
1189 } 1189 }
1190 1190
1191 skb->protocol=eth_type_trans(skb,dev); 1191 skb->protocol=eth_type_trans(skb,dev);
1192 skb->dev=dev; 1192 skb->dev=dev;
1193 dev->last_rx = jiffies; 1193 dev->last_rx = jiffies;
1194 lp->net_stats.rx_packets++; 1194 lp->net_stats.rx_packets++;
1195 lp->net_stats.rx_bytes += length; 1195 lp->net_stats.rx_bytes += length;
1196 netif_rx(skb); 1196 netif_rx(skb);
1197 } 1197 }
1198 1198
1199 dropped: 1199 dropped:
1200 p->length = 1532; 1200 p->length = 1532;
1201 p->status = 0; 1201 p->status = 0;
1202 1202
1203 rx_ring_tail=next_rx(rx_ring_tail); 1203 rx_ring_tail=next_rx(rx_ring_tail);
1204 } 1204 }
1205 while(x++<48); 1205 while(x++<48);
1206 1206
1207 /* If there was actually a frame to be processed, place the EOL bit */ 1207 /* If there was actually a frame to be processed, place the EOL bit */
1208 /* at the descriptor prior to the one to be filled next */ 1208 /* at the descriptor prior to the one to be filled next */
1209 1209
1210 if (rx_ring_tail != rx_old_tail) 1210 if (rx_ring_tail != rx_old_tail)
1211 { 1211 {
1212 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; 1212 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1213 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; 1213 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1214 1214
1215 lp->rx_ring_tail=rx_ring_tail; 1215 lp->rx_ring_tail=rx_ring_tail;
1216 } 1216 }
1217} 1217}
1218 1218
@@ -1228,10 +1228,10 @@ static void mc32_rx_ring(struct net_device *dev)
1228 * any errors. This continues until the transmit ring is emptied 1228 * any errors. This continues until the transmit ring is emptied
1229 * or we reach a descriptor that hasn't yet been processed by the 1229 * or we reach a descriptor that hasn't yet been processed by the
1230 * card. 1230 * card.
1231 * 1231 *
1232 */ 1232 */
1233 1233
1234static void mc32_tx_ring(struct net_device *dev) 1234static void mc32_tx_ring(struct net_device *dev)
1235{ 1235{
1236 struct mc32_local *lp = netdev_priv(dev); 1236 struct mc32_local *lp = netdev_priv(dev);
1237 volatile struct skb_header *np; 1237 volatile struct skb_header *np;
@@ -1243,28 +1243,28 @@ static void mc32_tx_ring(struct net_device *dev)
1243 * condition with 'queue full' 1243 * condition with 'queue full'
1244 */ 1244 */
1245 1245
1246 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head)) 1246 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1247 { 1247 {
1248 u16 t; 1248 u16 t;
1249 1249
1250 t=next_tx(lp->tx_ring_tail); 1250 t=next_tx(lp->tx_ring_tail);
1251 np=lp->tx_ring[t].p; 1251 np=lp->tx_ring[t].p;
1252 1252
1253 if(!(np->status & (1<<7))) 1253 if(!(np->status & (1<<7)))
1254 { 1254 {
1255 /* Not COMPLETED */ 1255 /* Not COMPLETED */
1256 break; 1256 break;
1257 } 1257 }
1258 lp->net_stats.tx_packets++; 1258 lp->net_stats.tx_packets++;
1259 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ 1259 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1260 { 1260 {
1261 lp->net_stats.tx_errors++; 1261 lp->net_stats.tx_errors++;
1262 1262
1263 switch(np->status&0x0F) 1263 switch(np->status&0x0F)
1264 { 1264 {
1265 case 1: 1265 case 1:
1266 lp->net_stats.tx_aborted_errors++; 1266 lp->net_stats.tx_aborted_errors++;
1267 break; /* Max collisions */ 1267 break; /* Max collisions */
1268 case 2: 1268 case 2:
1269 lp->net_stats.tx_fifo_errors++; 1269 lp->net_stats.tx_fifo_errors++;
1270 break; 1270 break;
@@ -1273,10 +1273,10 @@ static void mc32_tx_ring(struct net_device *dev)
1273 break; 1273 break;
1274 case 4: 1274 case 4:
1275 lp->net_stats.tx_window_errors++; 1275 lp->net_stats.tx_window_errors++;
1276 break; /* CTS Lost */ 1276 break; /* CTS Lost */
1277 case 5: 1277 case 5:
1278 lp->net_stats.tx_aborted_errors++; 1278 lp->net_stats.tx_aborted_errors++;
1279 break; /* Transmit timeout */ 1279 break; /* Transmit timeout */
1280 } 1280 }
1281 } 1281 }
1282 /* Packets are sent in order - this is 1282 /* Packets are sent in order - this is
@@ -1288,10 +1288,10 @@ static void mc32_tx_ring(struct net_device *dev)
1288 atomic_inc(&lp->tx_count); 1288 atomic_inc(&lp->tx_count);
1289 netif_wake_queue(dev); 1289 netif_wake_queue(dev);
1290 1290
1291 lp->tx_ring_tail=t; 1291 lp->tx_ring_tail=t;
1292 } 1292 }
1293 1293
1294} 1294}
1295 1295
1296 1296
1297/** 1297/**
@@ -1322,13 +1322,13 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1322 struct mc32_local *lp; 1322 struct mc32_local *lp;
1323 int ioaddr, status, boguscount = 0; 1323 int ioaddr, status, boguscount = 0;
1324 int rx_event = 0; 1324 int rx_event = 0;
1325 int tx_event = 0; 1325 int tx_event = 0;
1326 1326
1327 if (dev == NULL) { 1327 if (dev == NULL) {
1328 printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq); 1328 printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
1329 return IRQ_NONE; 1329 return IRQ_NONE;
1330 } 1330 }
1331 1331
1332 ioaddr = dev->base_addr; 1332 ioaddr = dev->base_addr;
1333 lp = netdev_priv(dev); 1333 lp = netdev_priv(dev);
1334 1334
@@ -1338,19 +1338,19 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1338 { 1338 {
1339 status=inb(ioaddr+HOST_CMD); 1339 status=inb(ioaddr+HOST_CMD);
1340 1340
1341#ifdef DEBUG_IRQ 1341#ifdef DEBUG_IRQ
1342 printk("Status TX%d RX%d EX%d OV%d BC%d\n", 1342 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1343 (status&7), (status>>3)&7, (status>>6)&1, 1343 (status&7), (status>>3)&7, (status>>6)&1,
1344 (status>>7)&1, boguscount); 1344 (status>>7)&1, boguscount);
1345#endif 1345#endif
1346 1346
1347 switch(status&7) 1347 switch(status&7)
1348 { 1348 {
1349 case 0: 1349 case 0:
1350 break; 1350 break;
1351 case 6: /* TX fail */ 1351 case 6: /* TX fail */
1352 case 2: /* TX ok */ 1352 case 2: /* TX ok */
1353 tx_event = 1; 1353 tx_event = 1;
1354 break; 1354 break;
1355 case 3: /* Halt */ 1355 case 3: /* Halt */
1356 case 4: /* Abort */ 1356 case 4: /* Abort */
@@ -1365,7 +1365,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1365 case 0: 1365 case 0:
1366 break; 1366 break;
1367 case 2: /* RX */ 1367 case 2: /* RX */
1368 rx_event=1; 1368 rx_event=1;
1369 break; 1369 break;
1370 case 3: /* Halt */ 1370 case 3: /* Halt */
1371 case 4: /* Abort */ 1371 case 4: /* Abort */
@@ -1375,12 +1375,12 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1375 /* Out of RX buffers stat */ 1375 /* Out of RX buffers stat */
1376 /* Must restart rx */ 1376 /* Must restart rx */
1377 lp->net_stats.rx_dropped++; 1377 lp->net_stats.rx_dropped++;
1378 mc32_rx_ring(dev); 1378 mc32_rx_ring(dev);
1379 mc32_start_transceiver(dev); 1379 mc32_start_transceiver(dev);
1380 break; 1380 break;
1381 default: 1381 default:
1382 printk("%s: strange rx ack %d\n", 1382 printk("%s: strange rx ack %d\n",
1383 dev->name, status&7); 1383 dev->name, status&7);
1384 } 1384 }
1385 status>>=3; 1385 status>>=3;
1386 if(status&1) 1386 if(status&1)
@@ -1389,10 +1389,10 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1389 * No thread is waiting: we need to tidy 1389 * No thread is waiting: we need to tidy
1390 * up ourself. 1390 * up ourself.
1391 */ 1391 */
1392 1392
1393 if (lp->cmd_nonblocking) { 1393 if (lp->cmd_nonblocking) {
1394 up(&lp->cmd_mutex); 1394 up(&lp->cmd_mutex);
1395 if (lp->mc_reload_wait) 1395 if (lp->mc_reload_wait)
1396 mc32_reset_multicast_list(dev); 1396 mc32_reset_multicast_list(dev);
1397 } 1397 }
1398 else complete(&lp->execution_cmd); 1398 else complete(&lp->execution_cmd);
@@ -1401,22 +1401,22 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1401 { 1401 {
1402 /* 1402 /*
1403 * We get interrupted once per 1403 * We get interrupted once per
1404 * counter that is about to overflow. 1404 * counter that is about to overflow.
1405 */ 1405 */
1406 1406
1407 mc32_update_stats(dev); 1407 mc32_update_stats(dev);
1408 } 1408 }
1409 } 1409 }
1410 1410
1411 1411
1412 /* 1412 /*
1413 * Process the transmit and receive rings 1413 * Process the transmit and receive rings
1414 */ 1414 */
1415 1415
1416 if(tx_event) 1416 if(tx_event)
1417 mc32_tx_ring(dev); 1417 mc32_tx_ring(dev);
1418 1418
1419 if(rx_event) 1419 if(rx_event)
1420 mc32_rx_ring(dev); 1420 mc32_rx_ring(dev);
1421 1421
1422 return IRQ_HANDLED; 1422 return IRQ_HANDLED;
@@ -1435,7 +1435,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1435 * driver. Otherwise, it is possible that the card may run out 1435 * driver. Otherwise, it is possible that the card may run out
1436 * of receive buffers and restart the transceiver while we're 1436 * of receive buffers and restart the transceiver while we're
1437 * trying to close it. 1437 * trying to close it.
1438 * 1438 *
1439 * We abort any receive and transmits going on and then wait until 1439 * We abort any receive and transmits going on and then wait until
1440 * any pending exec commands have completed in other code threads. 1440 * any pending exec commands have completed in other code threads.
1441 * In theory we can't get here while that is true, in practice I am 1441 * In theory we can't get here while that is true, in practice I am
@@ -1452,7 +1452,7 @@ static int mc32_close(struct net_device *dev)
1452 1452
1453 u8 regs; 1453 u8 regs;
1454 u16 one=1; 1454 u16 one=1;
1455 1455
1456 lp->xceiver_desired_state = HALTED; 1456 lp->xceiver_desired_state = HALTED;
1457 netif_stop_queue(dev); 1457 netif_stop_queue(dev);
1458 1458
@@ -1464,22 +1464,22 @@ static int mc32_close(struct net_device *dev)
1464 1464
1465 /* Shut down the transceiver */ 1465 /* Shut down the transceiver */
1466 1466
1467 mc32_halt_transceiver(dev); 1467 mc32_halt_transceiver(dev);
1468 1468
1469 /* Ensure we issue no more commands beyond this point */ 1469 /* Ensure we issue no more commands beyond this point */
1470 1470
1471 down(&lp->cmd_mutex); 1471 down(&lp->cmd_mutex);
1472 1472
1473 /* Ok the card is now stopping */ 1473 /* Ok the card is now stopping */
1474 1474
1475 regs=inb(ioaddr+HOST_CTRL); 1475 regs=inb(ioaddr+HOST_CTRL);
1476 regs&=~HOST_CTRL_INTE; 1476 regs&=~HOST_CTRL_INTE;
1477 outb(regs, ioaddr+HOST_CTRL); 1477 outb(regs, ioaddr+HOST_CTRL);
1478 1478
1479 mc32_flush_rx_ring(dev); 1479 mc32_flush_rx_ring(dev);
1480 mc32_flush_tx_ring(dev); 1480 mc32_flush_tx_ring(dev);
1481 1481
1482 mc32_update_stats(dev); 1482 mc32_update_stats(dev);
1483 1483
1484 return 0; 1484 return 0;
1485} 1485}
@@ -1490,15 +1490,15 @@ static int mc32_close(struct net_device *dev)
1490 * @dev: The 3c527 card to handle 1490 * @dev: The 3c527 card to handle
1491 * 1491 *
1492 * We've collected all the stats we can in software already. Now 1492 * We've collected all the stats we can in software already. Now
1493 * it's time to update those kept on-card and return the lot. 1493 * it's time to update those kept on-card and return the lot.
1494 * 1494 *
1495 */ 1495 */
1496 1496
1497static struct net_device_stats *mc32_get_stats(struct net_device *dev) 1497static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1498{ 1498{
1499 struct mc32_local *lp = netdev_priv(dev); 1499 struct mc32_local *lp = netdev_priv(dev);
1500 1500
1501 mc32_update_stats(dev); 1501 mc32_update_stats(dev);
1502 return &lp->net_stats; 1502 return &lp->net_stats;
1503} 1503}
1504 1504
@@ -1506,7 +1506,7 @@ static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1506/** 1506/**
1507 * do_mc32_set_multicast_list - attempt to update multicasts 1507 * do_mc32_set_multicast_list - attempt to update multicasts
1508 * @dev: 3c527 device to load the list on 1508 * @dev: 3c527 device to load the list on
1509 * @retry: indicates this is not the first call. 1509 * @retry: indicates this is not the first call.
1510 * 1510 *
1511 * 1511 *
1512 * Actually set or clear the multicast filter for this adaptor. The 1512 * Actually set or clear the multicast filter for this adaptor. The
@@ -1514,22 +1514,22 @@ static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1514 * state as it may take multiple calls to get the command sequence 1514 * state as it may take multiple calls to get the command sequence
1515 * completed. We just keep trying to schedule the loads until we 1515 * completed. We just keep trying to schedule the loads until we
1516 * manage to process them all. 1516 * manage to process them all.
1517 * 1517 *
1518 * num_addrs == -1 Promiscuous mode, receive all packets 1518 * num_addrs == -1 Promiscuous mode, receive all packets
1519 * 1519 *
1520 * num_addrs == 0 Normal mode, clear multicast list 1520 * num_addrs == 0 Normal mode, clear multicast list
1521 *
1522 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1523 * and do best-effort filtering.
1524 * 1521 *
1525 * See mc32_update_stats() regards setting the SAV BP bit. 1522 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1523 * and do best-effort filtering.
1524 *
1525 * See mc32_update_stats() regards setting the SAV BP bit.
1526 * 1526 *
1527 */ 1527 */
1528 1528
1529static void do_mc32_set_multicast_list(struct net_device *dev, int retry) 1529static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1530{ 1530{
1531 struct mc32_local *lp = netdev_priv(dev); 1531 struct mc32_local *lp = netdev_priv(dev);
1532 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1532 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1533 1533
1534 if (dev->flags&IFF_PROMISC) 1534 if (dev->flags&IFF_PROMISC)
1535 /* Enable promiscuous mode */ 1535 /* Enable promiscuous mode */
@@ -1544,9 +1544,9 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1544 unsigned char block[62]; 1544 unsigned char block[62];
1545 unsigned char *bp; 1545 unsigned char *bp;
1546 struct dev_mc_list *dmc=dev->mc_list; 1546 struct dev_mc_list *dmc=dev->mc_list;
1547 1547
1548 int i; 1548 int i;
1549 1549
1550 if(retry==0) 1550 if(retry==0)
1551 lp->mc_list_valid = 0; 1551 lp->mc_list_valid = 0;
1552 if(!lp->mc_list_valid) 1552 if(!lp->mc_list_valid)
@@ -1554,7 +1554,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1554 block[1]=0; 1554 block[1]=0;
1555 block[0]=dev->mc_count; 1555 block[0]=dev->mc_count;
1556 bp=block+2; 1556 bp=block+2;
1557 1557
1558 for(i=0;i<dev->mc_count;i++) 1558 for(i=0;i<dev->mc_count;i++)
1559 { 1559 {
1560 memcpy(bp, dmc->dmi_addr, 6); 1560 memcpy(bp, dmc->dmi_addr, 6);
@@ -1569,12 +1569,12 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1569 lp->mc_list_valid=1; 1569 lp->mc_list_valid=1;
1570 } 1570 }
1571 } 1571 }
1572 1572
1573 if(mc32_command_nowait(dev, 0, &filt, 2)==-1) 1573 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1574 { 1574 {
1575 lp->mc_reload_wait = 1; 1575 lp->mc_reload_wait = 1;
1576 } 1576 }
1577 else { 1577 else {
1578 lp->mc_reload_wait = 0; 1578 lp->mc_reload_wait = 0;
1579 } 1579 }
1580} 1580}