aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/8390.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-13 13:24:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-13 13:24:59 -0400
commit6aa20a2235535605db6d6d2bd850298b2fe7f31e (patch)
treedf0b855043407b831d57f2f2c271f8aab48444f4 /drivers/net/8390.c
parent7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (diff)
drivers/net: Trim trailing whitespace
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/8390.c')
-rw-r--r--drivers/net/8390.c236
1 files changed, 118 insertions, 118 deletions
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 3eb7048684a..5b6b05ed8f3 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1,7 +1,7 @@
1/* 8390.c: A general NS8390 ethernet driver core for linux. */ 1/* 8390.c: A general NS8390 ethernet driver core for linux. */
2/* 2/*
3 Written 1992-94 by Donald Becker. 3 Written 1992-94 by Donald Becker.
4 4
5 Copyright 1993 United States Government as represented by the 5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency. 6 Director, National Security Agency.
7 7
@@ -13,7 +13,7 @@
13 410 Severn Ave., Suite 210 13 410 Severn Ave., Suite 210
14 Annapolis MD 21403 14 Annapolis MD 21403
15 15
16 16
17 This is the chip-specific code for many 8390-based ethernet adaptors. 17 This is the chip-specific code for many 8390-based ethernet adaptors.
18 This is not a complete driver, it must be combined with board-specific 18 This is not a complete driver, it must be combined with board-specific
19 code such as ne.c, wd.c, 3c503.c, etc. 19 code such as ne.c, wd.c, 3c503.c, etc.
@@ -27,7 +27,7 @@
27 Changelog: 27 Changelog:
28 28
29 Paul Gortmaker : remove set_bit lock, other cleanups. 29 Paul Gortmaker : remove set_bit lock, other cleanups.
30 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to 30 Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
31 ei_block_input() for eth_io_copy_and_sum(). 31 ei_block_input() for eth_io_copy_and_sum().
32 Paul Gortmaker : exchange static int ei_pingpong for a #define, 32 Paul Gortmaker : exchange static int ei_pingpong for a #define,
33 also add better Tx error handling. 33 also add better Tx error handling.
@@ -94,9 +94,9 @@ static const char version[] =
94 Read the 4 byte, page aligned 8390 header. *If* there is a 94 Read the 4 byte, page aligned 8390 header. *If* there is a
95 subsequent read, it will be of the rest of the packet. 95 subsequent read, it will be of the rest of the packet.
96 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 96 void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
97 Read COUNT bytes from the packet buffer into the skb data area. Start 97 Read COUNT bytes from the packet buffer into the skb data area. Start
98 reading from RING_OFFSET, the address as the 8390 sees it. This will always 98 reading from RING_OFFSET, the address as the 8390 sees it. This will always
99 follow the read of the 8390 header. 99 follow the read of the 8390 header.
100*/ 100*/
101#define ei_reset_8390 (ei_local->reset_8390) 101#define ei_reset_8390 (ei_local->reset_8390)
102#define ei_block_output (ei_local->block_output) 102#define ei_block_output (ei_local->block_output)
@@ -128,7 +128,7 @@ static void do_set_multicast_list(struct net_device *dev);
128 * a page register that controls bank and packet buffer access. We guard 128 * a page register that controls bank and packet buffer access. We guard
129 * this with ei_local->page_lock. Nobody should assume or set the page other 129 * this with ei_local->page_lock. Nobody should assume or set the page other
130 * than zero when the lock is not held. Lock holders must restore page 0 130 * than zero when the lock is not held. Lock holders must restore page 0
131 * before unlocking. Even pure readers must take the lock to protect in 131 * before unlocking. Even pure readers must take the lock to protect in
132 * page 0. 132 * page 0.
133 * 133 *
134 * To make life difficult the chip can also be very slow. We therefore can't 134 * To make life difficult the chip can also be very slow. We therefore can't
@@ -141,14 +141,14 @@ static void do_set_multicast_list(struct net_device *dev);
141 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" 141 * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
142 * enter lock, take the queued irq. So we waddle instead of flying. 142 * enter lock, take the queued irq. So we waddle instead of flying.
143 * 143 *
144 * Finally by special arrangement for the purpose of being generally 144 * Finally by special arrangement for the purpose of being generally
145 * annoying the transmit function is called bh atomic. That places 145 * annoying the transmit function is called bh atomic. That places
146 * restrictions on the user context callers as disable_irq won't save 146 * restrictions on the user context callers as disable_irq won't save
147 * them. 147 * them.
148 */ 148 */
149
150 149
151 150
151
152/** 152/**
153 * ei_open - Open/initialize the board. 153 * ei_open - Open/initialize the board.
154 * @dev: network device to initialize 154 * @dev: network device to initialize
@@ -168,12 +168,12 @@ int ei_open(struct net_device *dev)
168 dev->tx_timeout = ei_tx_timeout; 168 dev->tx_timeout = ei_tx_timeout;
169 if (dev->watchdog_timeo <= 0) 169 if (dev->watchdog_timeo <= 0)
170 dev->watchdog_timeo = TX_TIMEOUT; 170 dev->watchdog_timeo = TX_TIMEOUT;
171 171
172 /* 172 /*
173 * Grab the page lock so we own the register set, then call 173 * Grab the page lock so we own the register set, then call
174 * the init function. 174 * the init function.
175 */ 175 */
176 176
177 spin_lock_irqsave(&ei_local->page_lock, flags); 177 spin_lock_irqsave(&ei_local->page_lock, flags);
178 NS8390_init(dev, 1); 178 NS8390_init(dev, 1);
179 /* Set the flag before we drop the lock, That way the IRQ arrives 179 /* Set the flag before we drop the lock, That way the IRQ arrives
@@ -198,7 +198,7 @@ int ei_close(struct net_device *dev)
198 /* 198 /*
199 * Hold the page lock during close 199 * Hold the page lock during close
200 */ 200 */
201 201
202 spin_lock_irqsave(&ei_local->page_lock, flags); 202 spin_lock_irqsave(&ei_local->page_lock, flags);
203 NS8390_init(dev, 0); 203 NS8390_init(dev, 0);
204 spin_unlock_irqrestore(&ei_local->page_lock, flags); 204 spin_unlock_irqrestore(&ei_local->page_lock, flags);
@@ -241,26 +241,26 @@ void ei_tx_timeout(struct net_device *dev)
241 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : 241 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
242 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); 242 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
243 243
244 if (!isr && !ei_local->stat.tx_packets) 244 if (!isr && !ei_local->stat.tx_packets)
245 { 245 {
246 /* The 8390 probably hasn't gotten on the cable yet. */ 246 /* The 8390 probably hasn't gotten on the cable yet. */
247 ei_local->interface_num ^= 1; /* Try a different xcvr. */ 247 ei_local->interface_num ^= 1; /* Try a different xcvr. */
248 } 248 }
249 249
250 /* Ugly but a reset can be slow, yet must be protected */ 250 /* Ugly but a reset can be slow, yet must be protected */
251 251
252 disable_irq_nosync_lockdep(dev->irq); 252 disable_irq_nosync_lockdep(dev->irq);
253 spin_lock(&ei_local->page_lock); 253 spin_lock(&ei_local->page_lock);
254 254
255 /* Try to restart the card. Perhaps the user has fixed something. */ 255 /* Try to restart the card. Perhaps the user has fixed something. */
256 ei_reset_8390(dev); 256 ei_reset_8390(dev);
257 NS8390_init(dev, 1); 257 NS8390_init(dev, 1);
258 258
259 spin_unlock(&ei_local->page_lock); 259 spin_unlock(&ei_local->page_lock);
260 enable_irq_lockdep(dev->irq); 260 enable_irq_lockdep(dev->irq);
261 netif_wake_queue(dev); 261 netif_wake_queue(dev);
262} 262}
263 263
264/** 264/**
265 * ei_start_xmit - begin packet transmission 265 * ei_start_xmit - begin packet transmission
266 * @skb: packet to be sent 266 * @skb: packet to be sent
@@ -268,7 +268,7 @@ void ei_tx_timeout(struct net_device *dev)
268 * 268 *
269 * Sends a packet to an 8390 network device. 269 * Sends a packet to an 8390 network device.
270 */ 270 */
271 271
272static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 272static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
273{ 273{
274 long e8390_base = dev->base_addr; 274 long e8390_base = dev->base_addr;
@@ -285,24 +285,24 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
285 data = buf; 285 data = buf;
286 } 286 }
287 287
288 /* Mask interrupts from the ethercard. 288 /* Mask interrupts from the ethercard.
289 SMP: We have to grab the lock here otherwise the IRQ handler 289 SMP: We have to grab the lock here otherwise the IRQ handler
290 on another CPU can flip window and race the IRQ mask set. We end 290 on another CPU can flip window and race the IRQ mask set. We end
291 up trashing the mcast filter not disabling irqs if we don't lock */ 291 up trashing the mcast filter not disabling irqs if we don't lock */
292 292
293 spin_lock_irqsave(&ei_local->page_lock, flags); 293 spin_lock_irqsave(&ei_local->page_lock, flags);
294 outb_p(0x00, e8390_base + EN0_IMR); 294 outb_p(0x00, e8390_base + EN0_IMR);
295 spin_unlock_irqrestore(&ei_local->page_lock, flags); 295 spin_unlock_irqrestore(&ei_local->page_lock, flags);
296 296
297 297
298 /* 298 /*
299 * Slow phase with lock held. 299 * Slow phase with lock held.
300 */ 300 */
301 301
302 disable_irq_nosync_lockdep(dev->irq); 302 disable_irq_nosync_lockdep(dev->irq);
303 303
304 spin_lock(&ei_local->page_lock); 304 spin_lock(&ei_local->page_lock);
305 305
306 ei_local->irqlock = 1; 306 ei_local->irqlock = 1;
307 307
308 /* 308 /*
@@ -313,7 +313,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
313 * card, leaving a substantial gap between each transmitted packet. 313 * card, leaving a substantial gap between each transmitted packet.
314 */ 314 */
315 315
316 if (ei_local->tx1 == 0) 316 if (ei_local->tx1 == 0)
317 { 317 {
318 output_page = ei_local->tx_start_page; 318 output_page = ei_local->tx_start_page;
319 ei_local->tx1 = send_length; 319 ei_local->tx1 = send_length;
@@ -321,7 +321,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
321 printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", 321 printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
322 dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); 322 dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
323 } 323 }
324 else if (ei_local->tx2 == 0) 324 else if (ei_local->tx2 == 0)
325 { 325 {
326 output_page = ei_local->tx_start_page + TX_PAGES/2; 326 output_page = ei_local->tx_start_page + TX_PAGES/2;
327 ei_local->tx2 = send_length; 327 ei_local->tx2 = send_length;
@@ -348,20 +348,20 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
348 * isn't already sending. If it is busy, the interrupt handler will 348 * isn't already sending. If it is busy, the interrupt handler will
349 * trigger the send later, upon receiving a Tx done interrupt. 349 * trigger the send later, upon receiving a Tx done interrupt.
350 */ 350 */
351 351
352 ei_block_output(dev, send_length, data, output_page); 352 ei_block_output(dev, send_length, data, output_page);
353 353
354 if (! ei_local->txing) 354 if (! ei_local->txing)
355 { 355 {
356 ei_local->txing = 1; 356 ei_local->txing = 1;
357 NS8390_trigger_send(dev, send_length, output_page); 357 NS8390_trigger_send(dev, send_length, output_page);
358 dev->trans_start = jiffies; 358 dev->trans_start = jiffies;
359 if (output_page == ei_local->tx_start_page) 359 if (output_page == ei_local->tx_start_page)
360 { 360 {
361 ei_local->tx1 = -1; 361 ei_local->tx1 = -1;
362 ei_local->lasttx = -1; 362 ei_local->lasttx = -1;
363 } 363 }
364 else 364 else
365 { 365 {
366 ei_local->tx2 = -1; 366 ei_local->tx2 = -1;
367 ei_local->lasttx = -2; 367 ei_local->lasttx = -2;
@@ -377,16 +377,16 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
377 /* Turn 8390 interrupts back on. */ 377 /* Turn 8390 interrupts back on. */
378 ei_local->irqlock = 0; 378 ei_local->irqlock = 0;
379 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 379 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
380 380
381 spin_unlock(&ei_local->page_lock); 381 spin_unlock(&ei_local->page_lock);
382 enable_irq_lockdep(dev->irq); 382 enable_irq_lockdep(dev->irq);
383 383
384 dev_kfree_skb (skb); 384 dev_kfree_skb (skb);
385 ei_local->stat.tx_bytes += send_length; 385 ei_local->stat.tx_bytes += send_length;
386 386
387 return 0; 387 return 0;
388} 388}
389 389
390/** 390/**
391 * ei_interrupt - handle the interrupts from an 8390 391 * ei_interrupt - handle the interrupts from an 8390
392 * @irq: interrupt number 392 * @irq: interrupt number
@@ -406,23 +406,23 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
406 long e8390_base; 406 long e8390_base;
407 int interrupts, nr_serviced = 0; 407 int interrupts, nr_serviced = 0;
408 struct ei_device *ei_local; 408 struct ei_device *ei_local;
409 409
410 if (dev == NULL) 410 if (dev == NULL)
411 { 411 {
412 printk ("net_interrupt(): irq %d for unknown device.\n", irq); 412 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
413 return IRQ_NONE; 413 return IRQ_NONE;
414 } 414 }
415 415
416 e8390_base = dev->base_addr; 416 e8390_base = dev->base_addr;
417 ei_local = (struct ei_device *) netdev_priv(dev); 417 ei_local = (struct ei_device *) netdev_priv(dev);
418 418
419 /* 419 /*
420 * Protect the irq test too. 420 * Protect the irq test too.
421 */ 421 */
422 422
423 spin_lock(&ei_local->page_lock); 423 spin_lock(&ei_local->page_lock);
424 424
425 if (ei_local->irqlock) 425 if (ei_local->irqlock)
426 { 426 {
427#if 1 /* This might just be an interrupt for a PCI device sharing this line */ 427#if 1 /* This might just be an interrupt for a PCI device sharing this line */
428 /* The "irqlock" check is only for testing. */ 428 /* The "irqlock" check is only for testing. */
@@ -435,16 +435,16 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
435 spin_unlock(&ei_local->page_lock); 435 spin_unlock(&ei_local->page_lock);
436 return IRQ_NONE; 436 return IRQ_NONE;
437 } 437 }
438 438
439 /* Change to page 0 and read the intr status reg. */ 439 /* Change to page 0 and read the intr status reg. */
440 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); 440 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
441 if (ei_debug > 3) 441 if (ei_debug > 3)
442 printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, 442 printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
443 inb_p(e8390_base + EN0_ISR)); 443 inb_p(e8390_base + EN0_ISR));
444 444
445 /* !!Assumption!! -- we stay in page 0. Don't break this. */ 445 /* !!Assumption!! -- we stay in page 0. Don't break this. */
446 while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 446 while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
447 && ++nr_serviced < MAX_SERVICE) 447 && ++nr_serviced < MAX_SERVICE)
448 { 448 {
449 if (!netif_running(dev)) { 449 if (!netif_running(dev)) {
450 printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); 450 printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
@@ -453,9 +453,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
453 interrupts = 0; 453 interrupts = 0;
454 break; 454 break;
455 } 455 }
456 if (interrupts & ENISR_OVER) 456 if (interrupts & ENISR_OVER)
457 ei_rx_overrun(dev); 457 ei_rx_overrun(dev);
458 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) 458 else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
459 { 459 {
460 /* Got a good (?) packet. */ 460 /* Got a good (?) packet. */
461 ei_receive(dev); 461 ei_receive(dev);
@@ -466,27 +466,27 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
466 else if (interrupts & ENISR_TX_ERR) 466 else if (interrupts & ENISR_TX_ERR)
467 ei_tx_err(dev); 467 ei_tx_err(dev);
468 468
469 if (interrupts & ENISR_COUNTERS) 469 if (interrupts & ENISR_COUNTERS)
470 { 470 {
471 ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); 471 ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
472 ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); 472 ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
473 ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); 473 ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
474 outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ 474 outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
475 } 475 }
476 476
477 /* Ignore any RDC interrupts that make it back to here. */ 477 /* Ignore any RDC interrupts that make it back to here. */
478 if (interrupts & ENISR_RDC) 478 if (interrupts & ENISR_RDC)
479 { 479 {
480 outb_p(ENISR_RDC, e8390_base + EN0_ISR); 480 outb_p(ENISR_RDC, e8390_base + EN0_ISR);
481 } 481 }
482 482
483 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); 483 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
484 } 484 }
485 485
486 if (interrupts && ei_debug) 486 if (interrupts && ei_debug)
487 { 487 {
488 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); 488 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
489 if (nr_serviced >= MAX_SERVICE) 489 if (nr_serviced >= MAX_SERVICE)
490 { 490 {
491 /* 0xFF is valid for a card removal */ 491 /* 0xFF is valid for a card removal */
492 if(interrupts!=0xFF) 492 if(interrupts!=0xFF)
@@ -551,7 +551,7 @@ static void ei_tx_err(struct net_device *dev)
551 551
552 if (tx_was_aborted) 552 if (tx_was_aborted)
553 ei_tx_intr(dev); 553 ei_tx_intr(dev);
554 else 554 else
555 { 555 {
556 ei_local->stat.tx_errors++; 556 ei_local->stat.tx_errors++;
557 if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; 557 if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
@@ -573,7 +573,7 @@ static void ei_tx_intr(struct net_device *dev)
573 long e8390_base = dev->base_addr; 573 long e8390_base = dev->base_addr;
574 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 574 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
575 int status = inb(e8390_base + EN0_TSR); 575 int status = inb(e8390_base + EN0_TSR);
576 576
577 outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ 577 outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
578 578
579 /* 579 /*
@@ -582,13 +582,13 @@ static void ei_tx_intr(struct net_device *dev)
582 */ 582 */
583 ei_local->txqueue--; 583 ei_local->txqueue--;
584 584
585 if (ei_local->tx1 < 0) 585 if (ei_local->tx1 < 0)
586 { 586 {
587 if (ei_local->lasttx != 1 && ei_local->lasttx != -1) 587 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
588 printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", 588 printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
589 ei_local->name, ei_local->lasttx, ei_local->tx1); 589 ei_local->name, ei_local->lasttx, ei_local->tx1);
590 ei_local->tx1 = 0; 590 ei_local->tx1 = 0;
591 if (ei_local->tx2 > 0) 591 if (ei_local->tx2 > 0)
592 { 592 {
593 ei_local->txing = 1; 593 ei_local->txing = 1;
594 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); 594 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
@@ -596,15 +596,15 @@ static void ei_tx_intr(struct net_device *dev)
596 ei_local->tx2 = -1, 596 ei_local->tx2 = -1,
597 ei_local->lasttx = 2; 597 ei_local->lasttx = 2;
598 } 598 }
599 else ei_local->lasttx = 20, ei_local->txing = 0; 599 else ei_local->lasttx = 20, ei_local->txing = 0;
600 } 600 }
601 else if (ei_local->tx2 < 0) 601 else if (ei_local->tx2 < 0)
602 { 602 {
603 if (ei_local->lasttx != 2 && ei_local->lasttx != -2) 603 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
604 printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", 604 printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
605 ei_local->name, ei_local->lasttx, ei_local->tx2); 605 ei_local->name, ei_local->lasttx, ei_local->tx2);
606 ei_local->tx2 = 0; 606 ei_local->tx2 = 0;
607 if (ei_local->tx1 > 0) 607 if (ei_local->tx1 > 0)
608 { 608 {
609 ei_local->txing = 1; 609 ei_local->txing = 1;
610 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); 610 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
@@ -623,17 +623,17 @@ static void ei_tx_intr(struct net_device *dev)
623 ei_local->stat.collisions++; 623 ei_local->stat.collisions++;
624 if (status & ENTSR_PTX) 624 if (status & ENTSR_PTX)
625 ei_local->stat.tx_packets++; 625 ei_local->stat.tx_packets++;
626 else 626 else
627 { 627 {
628 ei_local->stat.tx_errors++; 628 ei_local->stat.tx_errors++;
629 if (status & ENTSR_ABT) 629 if (status & ENTSR_ABT)
630 { 630 {
631 ei_local->stat.tx_aborted_errors++; 631 ei_local->stat.tx_aborted_errors++;
632 ei_local->stat.collisions += 16; 632 ei_local->stat.collisions += 16;
633 } 633 }
634 if (status & ENTSR_CRS) 634 if (status & ENTSR_CRS)
635 ei_local->stat.tx_carrier_errors++; 635 ei_local->stat.tx_carrier_errors++;
636 if (status & ENTSR_FU) 636 if (status & ENTSR_FU)
637 ei_local->stat.tx_fifo_errors++; 637 ei_local->stat.tx_fifo_errors++;
638 if (status & ENTSR_CDH) 638 if (status & ENTSR_CDH)
639 ei_local->stat.tx_heartbeat_errors++; 639 ei_local->stat.tx_heartbeat_errors++;
@@ -647,7 +647,7 @@ static void ei_tx_intr(struct net_device *dev)
647 * ei_receive - receive some packets 647 * ei_receive - receive some packets
648 * @dev: network device with which receive will be run 648 * @dev: network device with which receive will be run
649 * 649 *
650 * We have a good packet(s), get it/them out of the buffers. 650 * We have a good packet(s), get it/them out of the buffers.
651 * Called with lock held. 651 * Called with lock held.
652 */ 652 */
653 653
@@ -660,42 +660,42 @@ static void ei_receive(struct net_device *dev)
660 int rx_pkt_count = 0; 660 int rx_pkt_count = 0;
661 struct e8390_pkt_hdr rx_frame; 661 struct e8390_pkt_hdr rx_frame;
662 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; 662 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
663 663
664 while (++rx_pkt_count < 10) 664 while (++rx_pkt_count < 10)
665 { 665 {
666 int pkt_len, pkt_stat; 666 int pkt_len, pkt_stat;
667 667
668 /* Get the rx page (incoming packet pointer). */ 668 /* Get the rx page (incoming packet pointer). */
669 outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); 669 outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
670 rxing_page = inb_p(e8390_base + EN1_CURPAG); 670 rxing_page = inb_p(e8390_base + EN1_CURPAG);
671 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); 671 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
672 672
673 /* Remove one frame from the ring. Boundary is always a page behind. */ 673 /* Remove one frame from the ring. Boundary is always a page behind. */
674 this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; 674 this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
675 if (this_frame >= ei_local->stop_page) 675 if (this_frame >= ei_local->stop_page)
676 this_frame = ei_local->rx_start_page; 676 this_frame = ei_local->rx_start_page;
677 677
678 /* Someday we'll omit the previous, iff we never get this message. 678 /* Someday we'll omit the previous, iff we never get this message.
679 (There is at least one clone claimed to have a problem.) 679 (There is at least one clone claimed to have a problem.)
680 680
681 Keep quiet if it looks like a card removal. One problem here 681 Keep quiet if it looks like a card removal. One problem here
682 is that some clones crash in roughly the same way. 682 is that some clones crash in roughly the same way.
683 */ 683 */
684 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) 684 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
685 printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", 685 printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
686 dev->name, this_frame, ei_local->current_page); 686 dev->name, this_frame, ei_local->current_page);
687 687
688 if (this_frame == rxing_page) /* Read all the frames? */ 688 if (this_frame == rxing_page) /* Read all the frames? */
689 break; /* Done for now */ 689 break; /* Done for now */
690 690
691 current_offset = this_frame << 8; 691 current_offset = this_frame << 8;
692 ei_get_8390_hdr(dev, &rx_frame, this_frame); 692 ei_get_8390_hdr(dev, &rx_frame, this_frame);
693 693
694 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); 694 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
695 pkt_stat = rx_frame.status; 695 pkt_stat = rx_frame.status;
696 696
697 next_frame = this_frame + 1 + ((pkt_len+4)>>8); 697 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
698 698
699 /* Check for bogosity warned by 3c503 book: the status byte is never 699 /* Check for bogosity warned by 3c503 book: the status byte is never
700 written. This happened a lot during testing! This code should be 700 written. This happened a lot during testing! This code should be
701 cleaned up someday. */ 701 cleaned up someday. */
@@ -709,7 +709,7 @@ static void ei_receive(struct net_device *dev)
709 continue; 709 continue;
710 } 710 }
711 711
712 if (pkt_len < 60 || pkt_len > 1518) 712 if (pkt_len < 60 || pkt_len > 1518)
713 { 713 {
714 if (ei_debug) 714 if (ei_debug)
715 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", 715 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
@@ -718,12 +718,12 @@ static void ei_receive(struct net_device *dev)
718 ei_local->stat.rx_errors++; 718 ei_local->stat.rx_errors++;
719 ei_local->stat.rx_length_errors++; 719 ei_local->stat.rx_length_errors++;
720 } 720 }
721 else if ((pkt_stat & 0x0F) == ENRSR_RXOK) 721 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
722 { 722 {
723 struct sk_buff *skb; 723 struct sk_buff *skb;
724 724
725 skb = dev_alloc_skb(pkt_len+2); 725 skb = dev_alloc_skb(pkt_len+2);
726 if (skb == NULL) 726 if (skb == NULL)
727 { 727 {
728 if (ei_debug > 1) 728 if (ei_debug > 1)
729 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", 729 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
@@ -745,8 +745,8 @@ static void ei_receive(struct net_device *dev)
745 if (pkt_stat & ENRSR_PHY) 745 if (pkt_stat & ENRSR_PHY)
746 ei_local->stat.multicast++; 746 ei_local->stat.multicast++;
747 } 747 }
748 } 748 }
749 else 749 else
750 { 750 {
751 if (ei_debug) 751 if (ei_debug)
752 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", 752 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
@@ -758,7 +758,7 @@ static void ei_receive(struct net_device *dev)
758 ei_local->stat.rx_fifo_errors++; 758 ei_local->stat.rx_fifo_errors++;
759 } 759 }
760 next_frame = rx_frame.next; 760 next_frame = rx_frame.next;
761 761
762 /* This _should_ never happen: it's here for avoiding bad clones. */ 762 /* This _should_ never happen: it's here for avoiding bad clones. */
763 if (next_frame >= ei_local->stop_page) { 763 if (next_frame >= ei_local->stop_page) {
764 printk("%s: next frame inconsistency, %#2x\n", dev->name, 764 printk("%s: next frame inconsistency, %#2x\n", dev->name,
@@ -785,7 +785,7 @@ static void ei_receive(struct net_device *dev)
785 * This includes causing "the NIC to defer indefinitely when it is stopped 785 * This includes causing "the NIC to defer indefinitely when it is stopped
786 * on a busy network." Ugh. 786 * on a busy network." Ugh.
787 * Called with lock held. Don't call this with the interrupts off or your 787 * Called with lock held. Don't call this with the interrupts off or your
788 * computer will hate you - it takes 10ms or so. 788 * computer will hate you - it takes 10ms or so.
789 */ 789 */
790 790
791static void ei_rx_overrun(struct net_device *dev) 791static void ei_rx_overrun(struct net_device *dev)
@@ -793,19 +793,19 @@ static void ei_rx_overrun(struct net_device *dev)
793 long e8390_base = dev->base_addr; 793 long e8390_base = dev->base_addr;
794 unsigned char was_txing, must_resend = 0; 794 unsigned char was_txing, must_resend = 0;
795 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 795 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
796 796
797 /* 797 /*
798 * Record whether a Tx was in progress and then issue the 798 * Record whether a Tx was in progress and then issue the
799 * stop command. 799 * stop command.
800 */ 800 */
801 was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; 801 was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
802 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); 802 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
803 803
804 if (ei_debug > 1) 804 if (ei_debug > 1)
805 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); 805 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
806 ei_local->stat.rx_over_errors++; 806 ei_local->stat.rx_over_errors++;
807 807
808 /* 808 /*
809 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 809 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
810 * Early datasheets said to poll the reset bit, but now they say that 810 * Early datasheets said to poll the reset bit, but now they say that
811 * it "is not a reliable indicator and subsequently should be ignored." 811 * it "is not a reliable indicator and subsequently should be ignored."
@@ -826,7 +826,7 @@ static void ei_rx_overrun(struct net_device *dev)
826 */ 826 */
827 827
828 if (was_txing) 828 if (was_txing)
829 { 829 {
830 unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); 830 unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
831 if (!tx_completed) 831 if (!tx_completed)
832 must_resend = 1; 832 must_resend = 1;
@@ -848,7 +848,7 @@ static void ei_rx_overrun(struct net_device *dev)
848 /* 848 /*
849 * Leave loopback mode, and resend any packet that got stopped. 849 * Leave loopback mode, and resend any packet that got stopped.
850 */ 850 */
851 outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); 851 outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
852 if (must_resend) 852 if (must_resend)
853 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); 853 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
854} 854}
@@ -856,13 +856,13 @@ static void ei_rx_overrun(struct net_device *dev)
856/* 856/*
857 * Collect the stats. This is called unlocked and from several contexts. 857 * Collect the stats. This is called unlocked and from several contexts.
858 */ 858 */
859 859
860static struct net_device_stats *get_stats(struct net_device *dev) 860static struct net_device_stats *get_stats(struct net_device *dev)
861{ 861{
862 long ioaddr = dev->base_addr; 862 long ioaddr = dev->base_addr;
863 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 863 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
864 unsigned long flags; 864 unsigned long flags;
865 865
866 /* If the card is stopped, just return the present stats. */ 866 /* If the card is stopped, just return the present stats. */
867 if (!netif_running(dev)) 867 if (!netif_running(dev))
868 return &ei_local->stat; 868 return &ei_local->stat;
@@ -873,7 +873,7 @@ static struct net_device_stats *get_stats(struct net_device *dev)
873 ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); 873 ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
874 ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); 874 ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
875 spin_unlock_irqrestore(&ei_local->page_lock, flags); 875 spin_unlock_irqrestore(&ei_local->page_lock, flags);
876 876
877 return &ei_local->stat; 877 return &ei_local->stat;
878} 878}
879 879
@@ -881,21 +881,21 @@ static struct net_device_stats *get_stats(struct net_device *dev)
881 * Form the 64 bit 8390 multicast table from the linked list of addresses 881 * Form the 64 bit 8390 multicast table from the linked list of addresses
882 * associated with this dev structure. 882 * associated with this dev structure.
883 */ 883 */
884 884
885static inline void make_mc_bits(u8 *bits, struct net_device *dev) 885static inline void make_mc_bits(u8 *bits, struct net_device *dev)
886{ 886{
887 struct dev_mc_list *dmi; 887 struct dev_mc_list *dmi;
888 888
889 for (dmi=dev->mc_list; dmi; dmi=dmi->next) 889 for (dmi=dev->mc_list; dmi; dmi=dmi->next)
890 { 890 {
891 u32 crc; 891 u32 crc;
892 if (dmi->dmi_addrlen != ETH_ALEN) 892 if (dmi->dmi_addrlen != ETH_ALEN)
893 { 893 {
894 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); 894 printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
895 continue; 895 continue;
896 } 896 }
897 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 897 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
898 /* 898 /*
899 * The 8390 uses the 6 most significant bits of the 899 * The 8390 uses the 6 most significant bits of the
900 * CRC to index the multicast table. 900 * CRC to index the multicast table.
901 */ 901 */
@@ -908,16 +908,16 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
908 * @dev: net device for which multicast filter is adjusted 908 * @dev: net device for which multicast filter is adjusted
909 * 909 *
910 * Set or clear the multicast filter for this adaptor. May be called 910 * Set or clear the multicast filter for this adaptor. May be called
911 * from a BH in 2.1.x. Must be called with lock held. 911 * from a BH in 2.1.x. Must be called with lock held.
912 */ 912 */
913 913
914static void do_set_multicast_list(struct net_device *dev) 914static void do_set_multicast_list(struct net_device *dev)
915{ 915{
916 long e8390_base = dev->base_addr; 916 long e8390_base = dev->base_addr;
917 int i; 917 int i;
918 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 918 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
919 919
920 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) 920 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
921 { 921 {
922 memset(ei_local->mcfilter, 0, 8); 922 memset(ei_local->mcfilter, 0, 8);
923 if (dev->mc_list) 923 if (dev->mc_list)
@@ -926,23 +926,23 @@ static void do_set_multicast_list(struct net_device *dev)
926 else 926 else
927 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ 927 memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
928 928
929 /* 929 /*
930 * DP8390 manuals don't specify any magic sequence for altering 930 * DP8390 manuals don't specify any magic sequence for altering
931 * the multicast regs on an already running card. To be safe, we 931 * the multicast regs on an already running card. To be safe, we
932 * ensure multicast mode is off prior to loading up the new hash 932 * ensure multicast mode is off prior to loading up the new hash
933 * table. If this proves to be not enough, we can always resort 933 * table. If this proves to be not enough, we can always resort
934 * to stopping the NIC, loading the table and then restarting. 934 * to stopping the NIC, loading the table and then restarting.
935 * 935 *
936 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC 936 * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
937 * Elite16) appear to be write-only. The NS 8390 data sheet lists 937 * Elite16) appear to be write-only. The NS 8390 data sheet lists
938 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and 938 * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
939 * Ultra32 EISA) appears to have this bug fixed. 939 * Ultra32 EISA) appears to have this bug fixed.
940 */ 940 */
941 941
942 if (netif_running(dev)) 942 if (netif_running(dev))
943 outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); 943 outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
944 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); 944 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
945 for(i = 0; i < 8; i++) 945 for(i = 0; i < 8; i++)
946 { 946 {
947 outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); 947 outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
948#ifndef BUG_83C690 948#ifndef BUG_83C690
@@ -965,16 +965,16 @@ static void do_set_multicast_list(struct net_device *dev)
965 * be parallel to just about everything else. Its also fairly quick and 965 * be parallel to just about everything else. Its also fairly quick and
966 * not called too often. Must protect against both bh and irq users 966 * not called too often. Must protect against both bh and irq users
967 */ 967 */
968 968
969static void set_multicast_list(struct net_device *dev) 969static void set_multicast_list(struct net_device *dev)
970{ 970{
971 unsigned long flags; 971 unsigned long flags;
972 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 972 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
973 973
974 spin_lock_irqsave(&ei_local->page_lock, flags); 974 spin_lock_irqsave(&ei_local->page_lock, flags);
975 do_set_multicast_list(dev); 975 do_set_multicast_list(dev);
976 spin_unlock_irqrestore(&ei_local->page_lock, flags); 976 spin_unlock_irqrestore(&ei_local->page_lock, flags);
977} 977}
978 978
979/** 979/**
980 * ethdev_setup - init rest of 8390 device struct 980 * ethdev_setup - init rest of 8390 device struct
@@ -989,7 +989,7 @@ static void ethdev_setup(struct net_device *dev)
989 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 989 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
990 if (ei_debug > 1) 990 if (ei_debug > 1)
991 printk(version); 991 printk(version);
992 992
993 dev->hard_start_xmit = &ei_start_xmit; 993 dev->hard_start_xmit = &ei_start_xmit;
994 dev->get_stats = get_stats; 994 dev->get_stats = get_stats;
995 dev->set_multicast_list = &set_multicast_list; 995 dev->set_multicast_list = &set_multicast_list;
@@ -1011,7 +1011,7 @@ struct net_device *__alloc_ei_netdev(int size)
1011 ethdev_setup); 1011 ethdev_setup);
1012} 1012}
1013 1013
1014 1014
1015 1015
1016 1016
1017/* This page of functions should be 8390 generic */ 1017/* This page of functions should be 8390 generic */
@@ -1033,9 +1033,9 @@ void NS8390_init(struct net_device *dev, int startp)
1033 int endcfg = ei_local->word16 1033 int endcfg = ei_local->word16
1034 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) 1034 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1035 : 0x48; 1035 : 0x48;
1036 1036
1037 if(sizeof(struct e8390_pkt_hdr)!=4) 1037 if(sizeof(struct e8390_pkt_hdr)!=4)
1038 panic("8390.c: header struct mispacked\n"); 1038 panic("8390.c: header struct mispacked\n");
1039 /* Follow National Semi's recommendations for initing the DP83902. */ 1039 /* Follow National Semi's recommendations for initing the DP83902. */
1040 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ 1040 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1041 outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ 1041 outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
@@ -1055,11 +1055,11 @@ void NS8390_init(struct net_device *dev, int startp)
1055 /* Clear the pending interrupts and mask. */ 1055 /* Clear the pending interrupts and mask. */
1056 outb_p(0xFF, e8390_base + EN0_ISR); 1056 outb_p(0xFF, e8390_base + EN0_ISR);
1057 outb_p(0x00, e8390_base + EN0_IMR); 1057 outb_p(0x00, e8390_base + EN0_IMR);
1058 1058
1059 /* Copy the station address into the DS8390 registers. */ 1059 /* Copy the station address into the DS8390 registers. */
1060 1060
1061 outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ 1061 outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1062 for(i = 0; i < 6; i++) 1062 for(i = 0; i < 6; i++)
1063 { 1063 {
1064 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); 1064 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1065 if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) 1065 if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
@@ -1073,7 +1073,7 @@ void NS8390_init(struct net_device *dev, int startp)
1073 ei_local->tx1 = ei_local->tx2 = 0; 1073 ei_local->tx1 = ei_local->tx2 = 0;
1074 ei_local->txing = 0; 1074 ei_local->txing = 0;
1075 1075
1076 if (startp) 1076 if (startp)
1077 { 1077 {
1078 outb_p(0xff, e8390_base + EN0_ISR); 1078 outb_p(0xff, e8390_base + EN0_ISR);
1079 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1079 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1085,18 +1085,18 @@ void NS8390_init(struct net_device *dev, int startp)
1085 } 1085 }
1086} 1086}
1087 1087
1088/* Trigger a transmit start, assuming the length is valid. 1088/* Trigger a transmit start, assuming the length is valid.
1089 Always called with the page lock held */ 1089 Always called with the page lock held */
1090 1090
1091static void NS8390_trigger_send(struct net_device *dev, unsigned int length, 1091static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1092 int start_page) 1092 int start_page)
1093{ 1093{
1094 long e8390_base = dev->base_addr; 1094 long e8390_base = dev->base_addr;
1095 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1095 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
1096 1096
1097 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1097 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1098 1098
1099 if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS) 1099 if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
1100 { 1100 {
1101 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1101 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
1102 dev->name); 1102 dev->name);