diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/cris/eth_v10.c | 428 |
1 files changed, 276 insertions, 152 deletions
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index edd6828f0a78..26ffa810e581 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -250,6 +250,7 @@ | |||
250 | #include <asm/system.h> | 250 | #include <asm/system.h> |
251 | #include <asm/ethernet.h> | 251 | #include <asm/ethernet.h> |
252 | #include <asm/cache.h> | 252 | #include <asm/cache.h> |
253 | #include <asm/arch/io_interface_mux.h> | ||
253 | 254 | ||
254 | //#define ETHDEBUG | 255 | //#define ETHDEBUG |
255 | #define D(x) | 256 | #define D(x) |
@@ -279,6 +280,9 @@ struct net_local { | |||
279 | * by this lock as well. | 280 | * by this lock as well. |
280 | */ | 281 | */ |
281 | spinlock_t lock; | 282 | spinlock_t lock; |
283 | |||
284 | spinlock_t led_lock; /* Protect LED state */ | ||
285 | spinlock_t transceiver_lock; /* Protect transceiver state. */ | ||
282 | }; | 286 | }; |
283 | 287 | ||
284 | typedef struct etrax_eth_descr | 288 | typedef struct etrax_eth_descr |
@@ -295,8 +299,6 @@ struct transceiver_ops | |||
295 | void (*check_duplex)(struct net_device* dev); | 299 | void (*check_duplex)(struct net_device* dev); |
296 | }; | 300 | }; |
297 | 301 | ||
298 | struct transceiver_ops* transceiver; | ||
299 | |||
300 | /* Duplex settings */ | 302 | /* Duplex settings */ |
301 | enum duplex | 303 | enum duplex |
302 | { | 304 | { |
@@ -307,7 +309,7 @@ enum duplex | |||
307 | 309 | ||
308 | /* Dma descriptors etc. */ | 310 | /* Dma descriptors etc. */ |
309 | 311 | ||
310 | #define MAX_MEDIA_DATA_SIZE 1518 | 312 | #define MAX_MEDIA_DATA_SIZE 1522 |
311 | 313 | ||
312 | #define MIN_PACKET_LEN 46 | 314 | #define MIN_PACKET_LEN 46 |
313 | #define ETHER_HEAD_LEN 14 | 315 | #define ETHER_HEAD_LEN 14 |
@@ -332,8 +334,8 @@ enum duplex | |||
332 | 334 | ||
333 | /*Intel LXT972A specific*/ | 335 | /*Intel LXT972A specific*/ |
334 | #define MDIO_INT_STATUS_REG_2 0x0011 | 336 | #define MDIO_INT_STATUS_REG_2 0x0011 |
335 | #define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 ) | 337 | #define MDIO_INT_FULL_DUPLEX_IND (1 << 9) |
336 | #define MDIO_INT_SPEED ( 1 << 14 ) | 338 | #define MDIO_INT_SPEED (1 << 14) |
337 | 339 | ||
338 | /* Network flash constants */ | 340 | /* Network flash constants */ |
339 | #define NET_FLASH_TIME (HZ/50) /* 20 ms */ | 341 | #define NET_FLASH_TIME (HZ/50) /* 20 ms */ |
@@ -344,8 +346,8 @@ enum duplex | |||
344 | #define NO_NETWORK_ACTIVITY 0 | 346 | #define NO_NETWORK_ACTIVITY 0 |
345 | #define NETWORK_ACTIVITY 1 | 347 | #define NETWORK_ACTIVITY 1 |
346 | 348 | ||
347 | #define NBR_OF_RX_DESC 64 | 349 | #define NBR_OF_RX_DESC 32 |
348 | #define NBR_OF_TX_DESC 256 | 350 | #define NBR_OF_TX_DESC 16 |
349 | 351 | ||
350 | /* Large packets are sent directly to upper layers while small packets are */ | 352 | /* Large packets are sent directly to upper layers while small packets are */ |
351 | /* copied (to reduce memory waste). The following constant decides the breakpoint */ | 353 | /* copied (to reduce memory waste). The following constant decides the breakpoint */ |
@@ -367,7 +369,6 @@ enum duplex | |||
367 | static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to | 369 | static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to |
368 | to be processed */ | 370 | to be processed */ |
369 | static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ | 371 | static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ |
370 | static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */ | ||
371 | 372 | ||
372 | static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); | 373 | static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); |
373 | 374 | ||
@@ -377,7 +378,6 @@ static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ | |||
377 | static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); | 378 | static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); |
378 | 379 | ||
379 | static unsigned int network_rec_config_shadow = 0; | 380 | static unsigned int network_rec_config_shadow = 0; |
380 | static unsigned int mdio_phy_addr; /* Transciever address */ | ||
381 | 381 | ||
382 | static unsigned int network_tr_ctrl_shadow = 0; | 382 | static unsigned int network_tr_ctrl_shadow = 0; |
383 | 383 | ||
@@ -411,7 +411,7 @@ static int e100_set_config(struct net_device* dev, struct ifmap* map); | |||
411 | static void e100_tx_timeout(struct net_device *dev); | 411 | static void e100_tx_timeout(struct net_device *dev); |
412 | static struct net_device_stats *e100_get_stats(struct net_device *dev); | 412 | static struct net_device_stats *e100_get_stats(struct net_device *dev); |
413 | static void set_multicast_list(struct net_device *dev); | 413 | static void set_multicast_list(struct net_device *dev); |
414 | static void e100_hardware_send_packet(char *buf, int length); | 414 | static void e100_hardware_send_packet(struct net_local* np, char *buf, int length); |
415 | static void update_rx_stats(struct net_device_stats *); | 415 | static void update_rx_stats(struct net_device_stats *); |
416 | static void update_tx_stats(struct net_device_stats *); | 416 | static void update_tx_stats(struct net_device_stats *); |
417 | static int e100_probe_transceiver(struct net_device* dev); | 417 | static int e100_probe_transceiver(struct net_device* dev); |
@@ -434,7 +434,10 @@ static void e100_clear_network_leds(unsigned long dummy); | |||
434 | static void e100_set_network_leds(int active); | 434 | static void e100_set_network_leds(int active); |
435 | 435 | ||
436 | static const struct ethtool_ops e100_ethtool_ops; | 436 | static const struct ethtool_ops e100_ethtool_ops; |
437 | 437 | #if defined(CONFIG_ETRAX_NO_PHY) | |
438 | static void dummy_check_speed(struct net_device* dev); | ||
439 | static void dummy_check_duplex(struct net_device* dev); | ||
440 | #else | ||
438 | static void broadcom_check_speed(struct net_device* dev); | 441 | static void broadcom_check_speed(struct net_device* dev); |
439 | static void broadcom_check_duplex(struct net_device* dev); | 442 | static void broadcom_check_duplex(struct net_device* dev); |
440 | static void tdk_check_speed(struct net_device* dev); | 443 | static void tdk_check_speed(struct net_device* dev); |
@@ -443,16 +446,28 @@ static void intel_check_speed(struct net_device* dev); | |||
443 | static void intel_check_duplex(struct net_device* dev); | 446 | static void intel_check_duplex(struct net_device* dev); |
444 | static void generic_check_speed(struct net_device* dev); | 447 | static void generic_check_speed(struct net_device* dev); |
445 | static void generic_check_duplex(struct net_device* dev); | 448 | static void generic_check_duplex(struct net_device* dev); |
449 | #endif | ||
450 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
451 | static void e100_netpoll(struct net_device* dev); | ||
452 | #endif | ||
453 | |||
454 | static int autoneg_normal = 1; | ||
446 | 455 | ||
447 | struct transceiver_ops transceivers[] = | 456 | struct transceiver_ops transceivers[] = |
448 | { | 457 | { |
458 | #if defined(CONFIG_ETRAX_NO_PHY) | ||
459 | {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */ | ||
460 | #else | ||
449 | {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ | 461 | {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ |
450 | {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ | 462 | {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ |
451 | {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ | 463 | {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ |
452 | {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ | 464 | {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ |
453 | {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ | 465 | {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ |
466 | #endif | ||
454 | }; | 467 | }; |
455 | 468 | ||
469 | struct transceiver_ops* transceiver = &transceivers[0]; | ||
470 | |||
456 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) | 471 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) |
457 | 472 | ||
458 | /* | 473 | /* |
@@ -471,14 +486,22 @@ etrax_ethernet_init(void) | |||
471 | int i, err; | 486 | int i, err; |
472 | 487 | ||
473 | printk(KERN_INFO | 488 | printk(KERN_INFO |
474 | "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n"); | 489 | "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n"); |
475 | 490 | ||
476 | dev = alloc_etherdev(sizeof(struct net_local)); | 491 | if (cris_request_io_interface(if_eth, cardname)) { |
477 | np = dev->priv; | 492 | printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n"); |
493 | return -EBUSY; | ||
494 | } | ||
478 | 495 | ||
496 | dev = alloc_etherdev(sizeof(struct net_local)); | ||
479 | if (!dev) | 497 | if (!dev) |
480 | return -ENOMEM; | 498 | return -ENOMEM; |
481 | 499 | ||
500 | np = netdev_priv(dev); | ||
501 | |||
502 | /* we do our own locking */ | ||
503 | dev->features |= NETIF_F_LLTX; | ||
504 | |||
482 | dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ | 505 | dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ |
483 | 506 | ||
484 | /* now setup our etrax specific stuff */ | 507 | /* now setup our etrax specific stuff */ |
@@ -498,14 +521,22 @@ etrax_ethernet_init(void) | |||
498 | dev->do_ioctl = e100_ioctl; | 521 | dev->do_ioctl = e100_ioctl; |
499 | dev->set_config = e100_set_config; | 522 | dev->set_config = e100_set_config; |
500 | dev->tx_timeout = e100_tx_timeout; | 523 | dev->tx_timeout = e100_tx_timeout; |
524 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
525 | dev->poll_controller = e100_netpoll; | ||
526 | #endif | ||
527 | |||
528 | spin_lock_init(&np->lock); | ||
529 | spin_lock_init(&np->led_lock); | ||
530 | spin_lock_init(&np->transceiver_lock); | ||
501 | 531 | ||
502 | /* Initialise the list of Etrax DMA-descriptors */ | 532 | /* Initialise the list of Etrax DMA-descriptors */ |
503 | 533 | ||
504 | /* Initialise receive descriptors */ | 534 | /* Initialise receive descriptors */ |
505 | 535 | ||
506 | for (i = 0; i < NBR_OF_RX_DESC; i++) { | 536 | for (i = 0; i < NBR_OF_RX_DESC; i++) { |
507 | /* Allocate two extra cachelines to make sure that buffer used by DMA | 537 | /* Allocate two extra cachelines to make sure that buffer used |
508 | * does not share cacheline with any other data (to avoid cache bug) | 538 | * by DMA does not share cacheline with any other data (to |
539 | * avoid cache bug) | ||
509 | */ | 540 | */ |
510 | RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | 541 | RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); |
511 | if (!RxDescList[i].skb) | 542 | if (!RxDescList[i].skb) |
@@ -541,7 +572,6 @@ etrax_ethernet_init(void) | |||
541 | 572 | ||
542 | myNextRxDesc = &RxDescList[0]; | 573 | myNextRxDesc = &RxDescList[0]; |
543 | myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; | 574 | myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; |
544 | myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; | ||
545 | myFirstTxDesc = &TxDescList[0]; | 575 | myFirstTxDesc = &TxDescList[0]; |
546 | myNextTxDesc = &TxDescList[0]; | 576 | myNextTxDesc = &TxDescList[0]; |
547 | myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; | 577 | myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; |
@@ -562,10 +592,11 @@ etrax_ethernet_init(void) | |||
562 | current_speed = 10; | 592 | current_speed = 10; |
563 | current_speed_selection = 0; /* Auto */ | 593 | current_speed_selection = 0; /* Auto */ |
564 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | 594 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; |
565 | duplex_timer.data = (unsigned long)dev; | 595 | speed_timer.data = (unsigned long)dev; |
566 | speed_timer.function = e100_check_speed; | 596 | speed_timer.function = e100_check_speed; |
567 | 597 | ||
568 | clear_led_timer.function = e100_clear_network_leds; | 598 | clear_led_timer.function = e100_clear_network_leds; |
599 | clear_led_timer.data = (unsigned long)dev; | ||
569 | 600 | ||
570 | full_duplex = 0; | 601 | full_duplex = 0; |
571 | current_duplex = autoneg; | 602 | current_duplex = autoneg; |
@@ -574,7 +605,6 @@ etrax_ethernet_init(void) | |||
574 | duplex_timer.function = e100_check_duplex; | 605 | duplex_timer.function = e100_check_duplex; |
575 | 606 | ||
576 | /* Initialize mii interface */ | 607 | /* Initialize mii interface */ |
577 | np->mii_if.phy_id = mdio_phy_addr; | ||
578 | np->mii_if.phy_id_mask = 0x1f; | 608 | np->mii_if.phy_id_mask = 0x1f; |
579 | np->mii_if.reg_num_mask = 0x1f; | 609 | np->mii_if.reg_num_mask = 0x1f; |
580 | np->mii_if.dev = dev; | 610 | np->mii_if.dev = dev; |
@@ -585,6 +615,9 @@ etrax_ethernet_init(void) | |||
585 | /* unwanted addresses are matched */ | 615 | /* unwanted addresses are matched */ |
586 | *R_NETWORK_GA_0 = 0x00000000; | 616 | *R_NETWORK_GA_0 = 0x00000000; |
587 | *R_NETWORK_GA_1 = 0x00000000; | 617 | *R_NETWORK_GA_1 = 0x00000000; |
618 | |||
619 | /* Initialize next time the led can flash */ | ||
620 | led_next_time = jiffies; | ||
588 | return 0; | 621 | return 0; |
589 | } | 622 | } |
590 | 623 | ||
@@ -595,7 +628,7 @@ etrax_ethernet_init(void) | |||
595 | static int | 628 | static int |
596 | e100_set_mac_address(struct net_device *dev, void *p) | 629 | e100_set_mac_address(struct net_device *dev, void *p) |
597 | { | 630 | { |
598 | struct net_local *np = (struct net_local *)dev->priv; | 631 | struct net_local *np = netdev_priv(dev); |
599 | struct sockaddr *addr = p; | 632 | struct sockaddr *addr = p; |
600 | int i; | 633 | int i; |
601 | 634 | ||
@@ -686,6 +719,25 @@ e100_open(struct net_device *dev) | |||
686 | goto grace_exit2; | 719 | goto grace_exit2; |
687 | } | 720 | } |
688 | 721 | ||
722 | /* | ||
723 | * Always allocate the DMA channels after the IRQ, | ||
724 | * and clean up on failure. | ||
725 | */ | ||
726 | |||
727 | if (cris_request_dma(NETWORK_TX_DMA_NBR, | ||
728 | cardname, | ||
729 | DMA_VERBOSE_ON_ERROR, | ||
730 | dma_eth)) { | ||
731 | goto grace_exit3; | ||
732 | } | ||
733 | |||
734 | if (cris_request_dma(NETWORK_RX_DMA_NBR, | ||
735 | cardname, | ||
736 | DMA_VERBOSE_ON_ERROR, | ||
737 | dma_eth)) { | ||
738 | goto grace_exit4; | ||
739 | } | ||
740 | |||
689 | /* give the HW an idea of what MAC address we want */ | 741 | /* give the HW an idea of what MAC address we want */ |
690 | 742 | ||
691 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | | 743 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | |
@@ -700,6 +752,7 @@ e100_open(struct net_device *dev) | |||
700 | 752 | ||
701 | *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ | 753 | *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ |
702 | #else | 754 | #else |
755 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522); | ||
703 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); | 756 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); |
704 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); | 757 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); |
705 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); | 758 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); |
@@ -719,8 +772,7 @@ e100_open(struct net_device *dev) | |||
719 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); | 772 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); |
720 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | 773 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; |
721 | 774 | ||
722 | save_flags(flags); | 775 | local_irq_save(flags); |
723 | cli(); | ||
724 | 776 | ||
725 | /* enable the irq's for ethernet DMA */ | 777 | /* enable the irq's for ethernet DMA */ |
726 | 778 | ||
@@ -752,12 +804,13 @@ e100_open(struct net_device *dev) | |||
752 | 804 | ||
753 | *R_DMA_CH0_FIRST = 0; | 805 | *R_DMA_CH0_FIRST = 0; |
754 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); | 806 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); |
807 | netif_start_queue(dev); | ||
755 | 808 | ||
756 | restore_flags(flags); | 809 | local_irq_restore(flags); |
757 | 810 | ||
758 | /* Probe for transceiver */ | 811 | /* Probe for transceiver */ |
759 | if (e100_probe_transceiver(dev)) | 812 | if (e100_probe_transceiver(dev)) |
760 | goto grace_exit3; | 813 | goto grace_exit5; |
761 | 814 | ||
762 | /* Start duplex/speed timers */ | 815 | /* Start duplex/speed timers */ |
763 | add_timer(&speed_timer); | 816 | add_timer(&speed_timer); |
@@ -766,10 +819,14 @@ e100_open(struct net_device *dev) | |||
766 | /* We are now ready to accept transmit requeusts from | 819 | /* We are now ready to accept transmit requeusts from |
767 | * the queueing layer of the networking. | 820 | * the queueing layer of the networking. |
768 | */ | 821 | */ |
769 | netif_start_queue(dev); | 822 | netif_carrier_on(dev); |
770 | 823 | ||
771 | return 0; | 824 | return 0; |
772 | 825 | ||
826 | grace_exit5: | ||
827 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | ||
828 | grace_exit4: | ||
829 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); | ||
773 | grace_exit3: | 830 | grace_exit3: |
774 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | 831 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); |
775 | grace_exit2: | 832 | grace_exit2: |
@@ -780,12 +837,20 @@ grace_exit0: | |||
780 | return -EAGAIN; | 837 | return -EAGAIN; |
781 | } | 838 | } |
782 | 839 | ||
783 | 840 | #if defined(CONFIG_ETRAX_NO_PHY) | |
841 | static void | ||
842 | dummy_check_speed(struct net_device* dev) | ||
843 | { | ||
844 | current_speed = 100; | ||
845 | } | ||
846 | #else | ||
784 | static void | 847 | static void |
785 | generic_check_speed(struct net_device* dev) | 848 | generic_check_speed(struct net_device* dev) |
786 | { | 849 | { |
787 | unsigned long data; | 850 | unsigned long data; |
788 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); | 851 | struct net_local *np = netdev_priv(dev); |
852 | |||
853 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | ||
789 | if ((data & ADVERTISE_100FULL) || | 854 | if ((data & ADVERTISE_100FULL) || |
790 | (data & ADVERTISE_100HALF)) | 855 | (data & ADVERTISE_100HALF)) |
791 | current_speed = 100; | 856 | current_speed = 100; |
@@ -797,7 +862,10 @@ static void | |||
797 | tdk_check_speed(struct net_device* dev) | 862 | tdk_check_speed(struct net_device* dev) |
798 | { | 863 | { |
799 | unsigned long data; | 864 | unsigned long data; |
800 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); | 865 | struct net_local *np = netdev_priv(dev); |
866 | |||
867 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
868 | MDIO_TDK_DIAGNOSTIC_REG); | ||
801 | current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); | 869 | current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); |
802 | } | 870 | } |
803 | 871 | ||
@@ -805,7 +873,10 @@ static void | |||
805 | broadcom_check_speed(struct net_device* dev) | 873 | broadcom_check_speed(struct net_device* dev) |
806 | { | 874 | { |
807 | unsigned long data; | 875 | unsigned long data; |
808 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); | 876 | struct net_local *np = netdev_priv(dev); |
877 | |||
878 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
879 | MDIO_AUX_CTRL_STATUS_REG); | ||
809 | current_speed = (data & MDIO_BC_SPEED ? 100 : 10); | 880 | current_speed = (data & MDIO_BC_SPEED ? 100 : 10); |
810 | } | 881 | } |
811 | 882 | ||
@@ -813,46 +884,62 @@ static void | |||
813 | intel_check_speed(struct net_device* dev) | 884 | intel_check_speed(struct net_device* dev) |
814 | { | 885 | { |
815 | unsigned long data; | 886 | unsigned long data; |
816 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); | 887 | struct net_local *np = netdev_priv(dev); |
888 | |||
889 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
890 | MDIO_INT_STATUS_REG_2); | ||
817 | current_speed = (data & MDIO_INT_SPEED ? 100 : 10); | 891 | current_speed = (data & MDIO_INT_SPEED ? 100 : 10); |
818 | } | 892 | } |
819 | 893 | #endif | |
820 | static void | 894 | static void |
821 | e100_check_speed(unsigned long priv) | 895 | e100_check_speed(unsigned long priv) |
822 | { | 896 | { |
823 | struct net_device* dev = (struct net_device*)priv; | 897 | struct net_device* dev = (struct net_device*)priv; |
898 | struct net_local *np = netdev_priv(dev); | ||
824 | static int led_initiated = 0; | 899 | static int led_initiated = 0; |
825 | unsigned long data; | 900 | unsigned long data; |
826 | int old_speed = current_speed; | 901 | int old_speed = current_speed; |
827 | 902 | ||
828 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR); | 903 | spin_lock(&np->transceiver_lock); |
904 | |||
905 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR); | ||
829 | if (!(data & BMSR_LSTATUS)) { | 906 | if (!(data & BMSR_LSTATUS)) { |
830 | current_speed = 0; | 907 | current_speed = 0; |
831 | } else { | 908 | } else { |
832 | transceiver->check_speed(dev); | 909 | transceiver->check_speed(dev); |
833 | } | 910 | } |
834 | 911 | ||
912 | spin_lock(&np->led_lock); | ||
835 | if ((old_speed != current_speed) || !led_initiated) { | 913 | if ((old_speed != current_speed) || !led_initiated) { |
836 | led_initiated = 1; | 914 | led_initiated = 1; |
837 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | 915 | e100_set_network_leds(NO_NETWORK_ACTIVITY); |
916 | if (current_speed) | ||
917 | netif_carrier_on(dev); | ||
918 | else | ||
919 | netif_carrier_off(dev); | ||
838 | } | 920 | } |
921 | spin_unlock(&np->led_lock); | ||
839 | 922 | ||
840 | /* Reinitialize the timer. */ | 923 | /* Reinitialize the timer. */ |
841 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | 924 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; |
842 | add_timer(&speed_timer); | 925 | add_timer(&speed_timer); |
926 | |||
927 | spin_unlock(&np->transceiver_lock); | ||
843 | } | 928 | } |
844 | 929 | ||
845 | static void | 930 | static void |
846 | e100_negotiate(struct net_device* dev) | 931 | e100_negotiate(struct net_device* dev) |
847 | { | 932 | { |
848 | unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); | 933 | struct net_local *np = netdev_priv(dev); |
934 | unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
935 | MII_ADVERTISE); | ||
849 | 936 | ||
850 | /* Discard old speed and duplex settings */ | 937 | /* Discard old speed and duplex settings */ |
851 | data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | | 938 | data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | |
852 | ADVERTISE_10HALF | ADVERTISE_10FULL); | 939 | ADVERTISE_10HALF | ADVERTISE_10FULL); |
853 | 940 | ||
854 | switch (current_speed_selection) { | 941 | switch (current_speed_selection) { |
855 | case 10 : | 942 | case 10: |
856 | if (current_duplex == full) | 943 | if (current_duplex == full) |
857 | data |= ADVERTISE_10FULL; | 944 | data |= ADVERTISE_10FULL; |
858 | else if (current_duplex == half) | 945 | else if (current_duplex == half) |
@@ -861,7 +948,7 @@ e100_negotiate(struct net_device* dev) | |||
861 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL; | 948 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL; |
862 | break; | 949 | break; |
863 | 950 | ||
864 | case 100 : | 951 | case 100: |
865 | if (current_duplex == full) | 952 | if (current_duplex == full) |
866 | data |= ADVERTISE_100FULL; | 953 | data |= ADVERTISE_100FULL; |
867 | else if (current_duplex == half) | 954 | else if (current_duplex == half) |
@@ -870,7 +957,7 @@ e100_negotiate(struct net_device* dev) | |||
870 | data |= ADVERTISE_100HALF | ADVERTISE_100FULL; | 957 | data |= ADVERTISE_100HALF | ADVERTISE_100FULL; |
871 | break; | 958 | break; |
872 | 959 | ||
873 | case 0 : /* Auto */ | 960 | case 0: /* Auto */ |
874 | if (current_duplex == full) | 961 | if (current_duplex == full) |
875 | data |= ADVERTISE_100FULL | ADVERTISE_10FULL; | 962 | data |= ADVERTISE_100FULL | ADVERTISE_10FULL; |
876 | else if (current_duplex == half) | 963 | else if (current_duplex == half) |
@@ -880,35 +967,44 @@ e100_negotiate(struct net_device* dev) | |||
880 | ADVERTISE_100HALF | ADVERTISE_100FULL; | 967 | ADVERTISE_100HALF | ADVERTISE_100FULL; |
881 | break; | 968 | break; |
882 | 969 | ||
883 | default : /* assume autoneg speed and duplex */ | 970 | default: /* assume autoneg speed and duplex */ |
884 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | | 971 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | |
885 | ADVERTISE_100HALF | ADVERTISE_100FULL; | 972 | ADVERTISE_100HALF | ADVERTISE_100FULL; |
973 | break; | ||
886 | } | 974 | } |
887 | 975 | ||
888 | e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data); | 976 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); |
889 | 977 | ||
890 | /* Renegotiate with link partner */ | 978 | /* Renegotiate with link partner */ |
891 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); | 979 | if (autoneg_normal) { |
980 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); | ||
892 | data |= BMCR_ANENABLE | BMCR_ANRESTART; | 981 | data |= BMCR_ANENABLE | BMCR_ANRESTART; |
893 | 982 | } | |
894 | e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data); | 983 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); |
895 | } | 984 | } |
896 | 985 | ||
897 | static void | 986 | static void |
898 | e100_set_speed(struct net_device* dev, unsigned long speed) | 987 | e100_set_speed(struct net_device* dev, unsigned long speed) |
899 | { | 988 | { |
989 | struct net_local *np = netdev_priv(dev); | ||
990 | |||
991 | spin_lock(&np->transceiver_lock); | ||
900 | if (speed != current_speed_selection) { | 992 | if (speed != current_speed_selection) { |
901 | current_speed_selection = speed; | 993 | current_speed_selection = speed; |
902 | e100_negotiate(dev); | 994 | e100_negotiate(dev); |
903 | } | 995 | } |
996 | spin_unlock(&np->transceiver_lock); | ||
904 | } | 997 | } |
905 | 998 | ||
906 | static void | 999 | static void |
907 | e100_check_duplex(unsigned long priv) | 1000 | e100_check_duplex(unsigned long priv) |
908 | { | 1001 | { |
909 | struct net_device *dev = (struct net_device *)priv; | 1002 | struct net_device *dev = (struct net_device *)priv; |
910 | struct net_local *np = (struct net_local *)dev->priv; | 1003 | struct net_local *np = netdev_priv(dev); |
911 | int old_duplex = full_duplex; | 1004 | int old_duplex; |
1005 | |||
1006 | spin_lock(&np->transceiver_lock); | ||
1007 | old_duplex = full_duplex; | ||
912 | transceiver->check_duplex(dev); | 1008 | transceiver->check_duplex(dev); |
913 | if (old_duplex != full_duplex) { | 1009 | if (old_duplex != full_duplex) { |
914 | /* Duplex changed */ | 1010 | /* Duplex changed */ |
@@ -920,13 +1016,22 @@ e100_check_duplex(unsigned long priv) | |||
920 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; | 1016 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; |
921 | add_timer(&duplex_timer); | 1017 | add_timer(&duplex_timer); |
922 | np->mii_if.full_duplex = full_duplex; | 1018 | np->mii_if.full_duplex = full_duplex; |
1019 | spin_unlock(&np->transceiver_lock); | ||
923 | } | 1020 | } |
924 | 1021 | #if defined(CONFIG_ETRAX_NO_PHY) | |
1022 | static void | ||
1023 | dummy_check_duplex(struct net_device* dev) | ||
1024 | { | ||
1025 | full_duplex = 1; | ||
1026 | } | ||
1027 | #else | ||
925 | static void | 1028 | static void |
926 | generic_check_duplex(struct net_device* dev) | 1029 | generic_check_duplex(struct net_device* dev) |
927 | { | 1030 | { |
928 | unsigned long data; | 1031 | unsigned long data; |
929 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); | 1032 | struct net_local *np = netdev_priv(dev); |
1033 | |||
1034 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | ||
930 | if ((data & ADVERTISE_10FULL) || | 1035 | if ((data & ADVERTISE_10FULL) || |
931 | (data & ADVERTISE_100FULL)) | 1036 | (data & ADVERTISE_100FULL)) |
932 | full_duplex = 1; | 1037 | full_duplex = 1; |
@@ -938,7 +1043,10 @@ static void | |||
938 | tdk_check_duplex(struct net_device* dev) | 1043 | tdk_check_duplex(struct net_device* dev) |
939 | { | 1044 | { |
940 | unsigned long data; | 1045 | unsigned long data; |
941 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); | 1046 | struct net_local *np = netdev_priv(dev); |
1047 | |||
1048 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
1049 | MDIO_TDK_DIAGNOSTIC_REG); | ||
942 | full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; | 1050 | full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; |
943 | } | 1051 | } |
944 | 1052 | ||
@@ -946,7 +1054,10 @@ static void | |||
946 | broadcom_check_duplex(struct net_device* dev) | 1054 | broadcom_check_duplex(struct net_device* dev) |
947 | { | 1055 | { |
948 | unsigned long data; | 1056 | unsigned long data; |
949 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); | 1057 | struct net_local *np = netdev_priv(dev); |
1058 | |||
1059 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
1060 | MDIO_AUX_CTRL_STATUS_REG); | ||
950 | full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; | 1061 | full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; |
951 | } | 1062 | } |
952 | 1063 | ||
@@ -954,38 +1065,51 @@ static void | |||
954 | intel_check_duplex(struct net_device* dev) | 1065 | intel_check_duplex(struct net_device* dev) |
955 | { | 1066 | { |
956 | unsigned long data; | 1067 | unsigned long data; |
957 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); | 1068 | struct net_local *np = netdev_priv(dev); |
1069 | |||
1070 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
1071 | MDIO_INT_STATUS_REG_2); | ||
958 | full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; | 1072 | full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; |
959 | } | 1073 | } |
960 | 1074 | #endif | |
961 | static void | 1075 | static void |
962 | e100_set_duplex(struct net_device* dev, enum duplex new_duplex) | 1076 | e100_set_duplex(struct net_device* dev, enum duplex new_duplex) |
963 | { | 1077 | { |
1078 | struct net_local *np = netdev_priv(dev); | ||
1079 | |||
1080 | spin_lock(&np->transceiver_lock); | ||
964 | if (new_duplex != current_duplex) { | 1081 | if (new_duplex != current_duplex) { |
965 | current_duplex = new_duplex; | 1082 | current_duplex = new_duplex; |
966 | e100_negotiate(dev); | 1083 | e100_negotiate(dev); |
967 | } | 1084 | } |
1085 | spin_unlock(&np->transceiver_lock); | ||
968 | } | 1086 | } |
969 | 1087 | ||
970 | static int | 1088 | static int |
971 | e100_probe_transceiver(struct net_device* dev) | 1089 | e100_probe_transceiver(struct net_device* dev) |
972 | { | 1090 | { |
1091 | #if !defined(CONFIG_ETRAX_NO_PHY) | ||
973 | unsigned int phyid_high; | 1092 | unsigned int phyid_high; |
974 | unsigned int phyid_low; | 1093 | unsigned int phyid_low; |
975 | unsigned int oui; | 1094 | unsigned int oui; |
976 | struct transceiver_ops* ops = NULL; | 1095 | struct transceiver_ops* ops = NULL; |
1096 | struct net_local *np = netdev_priv(dev); | ||
1097 | |||
1098 | spin_lock(&np->transceiver_lock); | ||
977 | 1099 | ||
978 | /* Probe MDIO physical address */ | 1100 | /* Probe MDIO physical address */ |
979 | for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { | 1101 | for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31; |
980 | if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff) | 1102 | np->mii_if.phy_id++) { |
1103 | if (e100_get_mdio_reg(dev, | ||
1104 | np->mii_if.phy_id, MII_BMSR) != 0xffff) | ||
981 | break; | 1105 | break; |
982 | } | 1106 | } |
983 | if (mdio_phy_addr == 32) | 1107 | if (np->mii_if.phy_id == 32) |
984 | return -ENODEV; | 1108 | return -ENODEV; |
985 | 1109 | ||
986 | /* Get manufacturer */ | 1110 | /* Get manufacturer */ |
987 | phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1); | 1111 | phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1); |
988 | phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2); | 1112 | phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2); |
989 | oui = (phyid_high << 6) | (phyid_low >> 10); | 1113 | oui = (phyid_high << 6) | (phyid_low >> 10); |
990 | 1114 | ||
991 | for (ops = &transceivers[0]; ops->oui; ops++) { | 1115 | for (ops = &transceivers[0]; ops->oui; ops++) { |
@@ -994,6 +1118,8 @@ e100_probe_transceiver(struct net_device* dev) | |||
994 | } | 1118 | } |
995 | transceiver = ops; | 1119 | transceiver = ops; |
996 | 1120 | ||
1121 | spin_unlock(&np->transceiver_lock); | ||
1122 | #endif | ||
997 | return 0; | 1123 | return 0; |
998 | } | 1124 | } |
999 | 1125 | ||
@@ -1088,13 +1214,14 @@ e100_receive_mdio_bit() | |||
1088 | static void | 1214 | static void |
1089 | e100_reset_transceiver(struct net_device* dev) | 1215 | e100_reset_transceiver(struct net_device* dev) |
1090 | { | 1216 | { |
1217 | struct net_local *np = netdev_priv(dev); | ||
1091 | unsigned short cmd; | 1218 | unsigned short cmd; |
1092 | unsigned short data; | 1219 | unsigned short data; |
1093 | int bitCounter; | 1220 | int bitCounter; |
1094 | 1221 | ||
1095 | data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); | 1222 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); |
1096 | 1223 | ||
1097 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2); | 1224 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2); |
1098 | 1225 | ||
1099 | e100_send_mdio_cmd(cmd, 1); | 1226 | e100_send_mdio_cmd(cmd, 1); |
1100 | 1227 | ||
@@ -1112,7 +1239,7 @@ e100_reset_transceiver(struct net_device* dev) | |||
1112 | static void | 1239 | static void |
1113 | e100_tx_timeout(struct net_device *dev) | 1240 | e100_tx_timeout(struct net_device *dev) |
1114 | { | 1241 | { |
1115 | struct net_local *np = (struct net_local *)dev->priv; | 1242 | struct net_local *np = netdev_priv(dev); |
1116 | unsigned long flags; | 1243 | unsigned long flags; |
1117 | 1244 | ||
1118 | spin_lock_irqsave(&np->lock, flags); | 1245 | spin_lock_irqsave(&np->lock, flags); |
@@ -1134,8 +1261,7 @@ e100_tx_timeout(struct net_device *dev) | |||
1134 | e100_reset_transceiver(dev); | 1261 | e100_reset_transceiver(dev); |
1135 | 1262 | ||
1136 | /* and get rid of the packets that never got an interrupt */ | 1263 | /* and get rid of the packets that never got an interrupt */ |
1137 | while (myFirstTxDesc != myNextTxDesc) | 1264 | while (myFirstTxDesc != myNextTxDesc) { |
1138 | { | ||
1139 | dev_kfree_skb(myFirstTxDesc->skb); | 1265 | dev_kfree_skb(myFirstTxDesc->skb); |
1140 | myFirstTxDesc->skb = 0; | 1266 | myFirstTxDesc->skb = 0; |
1141 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | 1267 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); |
@@ -1161,7 +1287,7 @@ e100_tx_timeout(struct net_device *dev) | |||
1161 | static int | 1287 | static int |
1162 | e100_send_packet(struct sk_buff *skb, struct net_device *dev) | 1288 | e100_send_packet(struct sk_buff *skb, struct net_device *dev) |
1163 | { | 1289 | { |
1164 | struct net_local *np = (struct net_local *)dev->priv; | 1290 | struct net_local *np = netdev_priv(dev); |
1165 | unsigned char *buf = skb->data; | 1291 | unsigned char *buf = skb->data; |
1166 | unsigned long flags; | 1292 | unsigned long flags; |
1167 | 1293 | ||
@@ -1174,7 +1300,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1174 | 1300 | ||
1175 | dev->trans_start = jiffies; | 1301 | dev->trans_start = jiffies; |
1176 | 1302 | ||
1177 | e100_hardware_send_packet(buf, skb->len); | 1303 | e100_hardware_send_packet(np, buf, skb->len); |
1178 | 1304 | ||
1179 | myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); | 1305 | myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); |
1180 | 1306 | ||
@@ -1197,13 +1323,15 @@ static irqreturn_t | |||
1197 | e100rxtx_interrupt(int irq, void *dev_id) | 1323 | e100rxtx_interrupt(int irq, void *dev_id) |
1198 | { | 1324 | { |
1199 | struct net_device *dev = (struct net_device *)dev_id; | 1325 | struct net_device *dev = (struct net_device *)dev_id; |
1200 | struct net_local *np = (struct net_local *)dev->priv; | 1326 | struct net_local *np = netdev_priv(dev); |
1201 | unsigned long irqbits = *R_IRQ_MASK2_RD; | 1327 | unsigned long irqbits; |
1202 | 1328 | ||
1203 | /* Disable RX/TX IRQs to avoid reentrancy */ | 1329 | /* |
1204 | *R_IRQ_MASK2_CLR = | 1330 | * Note that both rx and tx interrupts are blocked at this point, |
1205 | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | | 1331 | * regardless of which got us here. |
1206 | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); | 1332 | */ |
1333 | |||
1334 | irqbits = *R_IRQ_MASK2_RD; | ||
1207 | 1335 | ||
1208 | /* Handle received packets */ | 1336 | /* Handle received packets */ |
1209 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { | 1337 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { |
@@ -1219,7 +1347,7 @@ e100rxtx_interrupt(int irq, void *dev_id) | |||
1219 | * allocate a new buffer to put a packet in. | 1347 | * allocate a new buffer to put a packet in. |
1220 | */ | 1348 | */ |
1221 | e100_rx(dev); | 1349 | e100_rx(dev); |
1222 | ((struct net_local *)dev->priv)->stats.rx_packets++; | 1350 | np->stats.rx_packets++; |
1223 | /* restart/continue on the channel, for safety */ | 1351 | /* restart/continue on the channel, for safety */ |
1224 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); | 1352 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); |
1225 | /* clear dma channel 1 eop/descr irq bits */ | 1353 | /* clear dma channel 1 eop/descr irq bits */ |
@@ -1233,9 +1361,8 @@ e100rxtx_interrupt(int irq, void *dev_id) | |||
1233 | } | 1361 | } |
1234 | 1362 | ||
1235 | /* Report any packets that have been sent */ | 1363 | /* Report any packets that have been sent */ |
1236 | while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && | 1364 | while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && |
1237 | myFirstTxDesc != myNextTxDesc) | 1365 | (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { |
1238 | { | ||
1239 | np->stats.tx_bytes += myFirstTxDesc->skb->len; | 1366 | np->stats.tx_bytes += myFirstTxDesc->skb->len; |
1240 | np->stats.tx_packets++; | 1367 | np->stats.tx_packets++; |
1241 | 1368 | ||
@@ -1244,19 +1371,15 @@ e100rxtx_interrupt(int irq, void *dev_id) | |||
1244 | dev_kfree_skb_irq(myFirstTxDesc->skb); | 1371 | dev_kfree_skb_irq(myFirstTxDesc->skb); |
1245 | myFirstTxDesc->skb = 0; | 1372 | myFirstTxDesc->skb = 0; |
1246 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | 1373 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); |
1374 | /* Wake up queue. */ | ||
1375 | netif_wake_queue(dev); | ||
1247 | } | 1376 | } |
1248 | 1377 | ||
1249 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { | 1378 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { |
1250 | /* acknowledge the eop interrupt and wake up queue */ | 1379 | /* acknowledge the eop interrupt. */ |
1251 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); | 1380 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); |
1252 | netif_wake_queue(dev); | ||
1253 | } | 1381 | } |
1254 | 1382 | ||
1255 | /* Enable RX/TX IRQs again */ | ||
1256 | *R_IRQ_MASK2_SET = | ||
1257 | IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | | ||
1258 | IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); | ||
1259 | |||
1260 | return IRQ_HANDLED; | 1383 | return IRQ_HANDLED; |
1261 | } | 1384 | } |
1262 | 1385 | ||
@@ -1264,7 +1387,7 @@ static irqreturn_t | |||
1264 | e100nw_interrupt(int irq, void *dev_id) | 1387 | e100nw_interrupt(int irq, void *dev_id) |
1265 | { | 1388 | { |
1266 | struct net_device *dev = (struct net_device *)dev_id; | 1389 | struct net_device *dev = (struct net_device *)dev_id; |
1267 | struct net_local *np = (struct net_local *)dev->priv; | 1390 | struct net_local *np = netdev_priv(dev); |
1268 | unsigned long irqbits = *R_IRQ_MASK0_RD; | 1391 | unsigned long irqbits = *R_IRQ_MASK0_RD; |
1269 | 1392 | ||
1270 | /* check for underrun irq */ | 1393 | /* check for underrun irq */ |
@@ -1286,7 +1409,6 @@ e100nw_interrupt(int irq, void *dev_id) | |||
1286 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | 1409 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); |
1287 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | 1410 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; |
1288 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); | 1411 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); |
1289 | *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr); | ||
1290 | np->stats.tx_errors++; | 1412 | np->stats.tx_errors++; |
1291 | D(printk("ethernet excessive collisions!\n")); | 1413 | D(printk("ethernet excessive collisions!\n")); |
1292 | } | 1414 | } |
@@ -1299,12 +1421,13 @@ e100_rx(struct net_device *dev) | |||
1299 | { | 1421 | { |
1300 | struct sk_buff *skb; | 1422 | struct sk_buff *skb; |
1301 | int length = 0; | 1423 | int length = 0; |
1302 | struct net_local *np = (struct net_local *)dev->priv; | 1424 | struct net_local *np = netdev_priv(dev); |
1303 | unsigned char *skb_data_ptr; | 1425 | unsigned char *skb_data_ptr; |
1304 | #ifdef ETHDEBUG | 1426 | #ifdef ETHDEBUG |
1305 | int i; | 1427 | int i; |
1306 | #endif | 1428 | #endif |
1307 | 1429 | etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */ | |
1430 | spin_lock(&np->led_lock); | ||
1308 | if (!led_active && time_after(jiffies, led_next_time)) { | 1431 | if (!led_active && time_after(jiffies, led_next_time)) { |
1309 | /* light the network leds depending on the current speed. */ | 1432 | /* light the network leds depending on the current speed. */ |
1310 | e100_set_network_leds(NETWORK_ACTIVITY); | 1433 | e100_set_network_leds(NETWORK_ACTIVITY); |
@@ -1314,9 +1437,10 @@ e100_rx(struct net_device *dev) | |||
1314 | led_active = 1; | 1437 | led_active = 1; |
1315 | mod_timer(&clear_led_timer, jiffies + HZ/10); | 1438 | mod_timer(&clear_led_timer, jiffies + HZ/10); |
1316 | } | 1439 | } |
1440 | spin_unlock(&np->led_lock); | ||
1317 | 1441 | ||
1318 | length = myNextRxDesc->descr.hw_len - 4; | 1442 | length = myNextRxDesc->descr.hw_len - 4; |
1319 | ((struct net_local *)dev->priv)->stats.rx_bytes += length; | 1443 | np->stats.rx_bytes += length; |
1320 | 1444 | ||
1321 | #ifdef ETHDEBUG | 1445 | #ifdef ETHDEBUG |
1322 | printk("Got a packet of length %d:\n", length); | 1446 | printk("Got a packet of length %d:\n", length); |
@@ -1336,7 +1460,7 @@ e100_rx(struct net_device *dev) | |||
1336 | if (!skb) { | 1460 | if (!skb) { |
1337 | np->stats.rx_errors++; | 1461 | np->stats.rx_errors++; |
1338 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | 1462 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); |
1339 | return; | 1463 | goto update_nextrxdesc; |
1340 | } | 1464 | } |
1341 | 1465 | ||
1342 | skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ | 1466 | skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ |
@@ -1354,15 +1478,15 @@ e100_rx(struct net_device *dev) | |||
1354 | else { | 1478 | else { |
1355 | /* Large packet, send directly to upper layers and allocate new | 1479 | /* Large packet, send directly to upper layers and allocate new |
1356 | * memory (aligned to cache line boundary to avoid bug). | 1480 | * memory (aligned to cache line boundary to avoid bug). |
1357 | * Before sending the skb to upper layers we must make sure that | 1481 | * Before sending the skb to upper layers we must make sure |
1358 | * skb->data points to the aligned start of the packet. | 1482 | * that skb->data points to the aligned start of the packet. |
1359 | */ | 1483 | */ |
1360 | int align; | 1484 | int align; |
1361 | struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | 1485 | struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); |
1362 | if (!new_skb) { | 1486 | if (!new_skb) { |
1363 | np->stats.rx_errors++; | 1487 | np->stats.rx_errors++; |
1364 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | 1488 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); |
1365 | return; | 1489 | goto update_nextrxdesc; |
1366 | } | 1490 | } |
1367 | skb = myNextRxDesc->skb; | 1491 | skb = myNextRxDesc->skb; |
1368 | align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; | 1492 | align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; |
@@ -1377,9 +1501,10 @@ e100_rx(struct net_device *dev) | |||
1377 | /* Send the packet to the upper layers */ | 1501 | /* Send the packet to the upper layers */ |
1378 | netif_rx(skb); | 1502 | netif_rx(skb); |
1379 | 1503 | ||
1504 | update_nextrxdesc: | ||
1380 | /* Prepare for next packet */ | 1505 | /* Prepare for next packet */ |
1381 | myNextRxDesc->descr.status = 0; | 1506 | myNextRxDesc->descr.status = 0; |
1382 | myPrevRxDesc = myNextRxDesc; | 1507 | prevRxDesc = myNextRxDesc; |
1383 | myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); | 1508 | myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); |
1384 | 1509 | ||
1385 | rx_queue_len++; | 1510 | rx_queue_len++; |
@@ -1387,9 +1512,9 @@ e100_rx(struct net_device *dev) | |||
1387 | /* Check if descriptors should be returned */ | 1512 | /* Check if descriptors should be returned */ |
1388 | if (rx_queue_len == RX_QUEUE_THRESHOLD) { | 1513 | if (rx_queue_len == RX_QUEUE_THRESHOLD) { |
1389 | flush_etrax_cache(); | 1514 | flush_etrax_cache(); |
1390 | myPrevRxDesc->descr.ctrl |= d_eol; | 1515 | prevRxDesc->descr.ctrl |= d_eol; |
1391 | myLastRxDesc->descr.ctrl &= ~d_eol; | 1516 | myLastRxDesc->descr.ctrl &= ~d_eol; |
1392 | myLastRxDesc = myPrevRxDesc; | 1517 | myLastRxDesc = prevRxDesc; |
1393 | rx_queue_len = 0; | 1518 | rx_queue_len = 0; |
1394 | } | 1519 | } |
1395 | } | 1520 | } |
@@ -1398,7 +1523,7 @@ e100_rx(struct net_device *dev) | |||
1398 | static int | 1523 | static int |
1399 | e100_close(struct net_device *dev) | 1524 | e100_close(struct net_device *dev) |
1400 | { | 1525 | { |
1401 | struct net_local *np = (struct net_local *)dev->priv; | 1526 | struct net_local *np = netdev_priv(dev); |
1402 | 1527 | ||
1403 | printk(KERN_INFO "Closing %s.\n", dev->name); | 1528 | printk(KERN_INFO "Closing %s.\n", dev->name); |
1404 | 1529 | ||
@@ -1426,6 +1551,9 @@ e100_close(struct net_device *dev) | |||
1426 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); | 1551 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); |
1427 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | 1552 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); |
1428 | 1553 | ||
1554 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); | ||
1555 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | ||
1556 | |||
1429 | /* Update the statistics here. */ | 1557 | /* Update the statistics here. */ |
1430 | 1558 | ||
1431 | update_rx_stats(&np->stats); | 1559 | update_rx_stats(&np->stats); |
@@ -1443,18 +1571,11 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1443 | { | 1571 | { |
1444 | struct mii_ioctl_data *data = if_mii(ifr); | 1572 | struct mii_ioctl_data *data = if_mii(ifr); |
1445 | struct net_local *np = netdev_priv(dev); | 1573 | struct net_local *np = netdev_priv(dev); |
1574 | int rc = 0; | ||
1575 | int old_autoneg; | ||
1446 | 1576 | ||
1447 | spin_lock(&np->lock); /* Preempt protection */ | 1577 | spin_lock(&np->lock); /* Preempt protection */ |
1448 | switch (cmd) { | 1578 | switch (cmd) { |
1449 | case SIOCGMIIPHY: /* Get PHY address */ | ||
1450 | data->phy_id = mdio_phy_addr; | ||
1451 | break; | ||
1452 | case SIOCGMIIREG: /* Read MII register */ | ||
1453 | data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num); | ||
1454 | break; | ||
1455 | case SIOCSMIIREG: /* Write MII register */ | ||
1456 | e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in); | ||
1457 | break; | ||
1458 | /* The ioctls below should be considered obsolete but are */ | 1579 | /* The ioctls below should be considered obsolete but are */ |
1459 | /* still present for compatability with old scripts/apps */ | 1580 | /* still present for compatability with old scripts/apps */ |
1460 | case SET_ETH_SPEED_10: /* 10 Mbps */ | 1581 | case SET_ETH_SPEED_10: /* 10 Mbps */ |
@@ -1463,60 +1584,47 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1463 | case SET_ETH_SPEED_100: /* 100 Mbps */ | 1584 | case SET_ETH_SPEED_100: /* 100 Mbps */ |
1464 | e100_set_speed(dev, 100); | 1585 | e100_set_speed(dev, 100); |
1465 | break; | 1586 | break; |
1466 | case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */ | 1587 | case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */ |
1467 | e100_set_speed(dev, 0); | 1588 | e100_set_speed(dev, 0); |
1468 | break; | 1589 | break; |
1469 | case SET_ETH_DUPLEX_HALF: /* Half duplex. */ | 1590 | case SET_ETH_DUPLEX_HALF: /* Half duplex */ |
1470 | e100_set_duplex(dev, half); | 1591 | e100_set_duplex(dev, half); |
1471 | break; | 1592 | break; |
1472 | case SET_ETH_DUPLEX_FULL: /* Full duplex. */ | 1593 | case SET_ETH_DUPLEX_FULL: /* Full duplex */ |
1473 | e100_set_duplex(dev, full); | 1594 | e100_set_duplex(dev, full); |
1474 | break; | 1595 | break; |
1475 | case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/ | 1596 | case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */ |
1476 | e100_set_duplex(dev, autoneg); | 1597 | e100_set_duplex(dev, autoneg); |
1477 | break; | 1598 | break; |
1599 | case SET_ETH_AUTONEG: | ||
1600 | old_autoneg = autoneg_normal; | ||
1601 | autoneg_normal = *(int*)data; | ||
1602 | if (autoneg_normal != old_autoneg) | ||
1603 | e100_negotiate(dev); | ||
1604 | break; | ||
1478 | default: | 1605 | default: |
1479 | return -EINVAL; | 1606 | rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr), |
1607 | cmd, NULL); | ||
1608 | break; | ||
1480 | } | 1609 | } |
1481 | spin_unlock(&np->lock); | 1610 | spin_unlock(&np->lock); |
1482 | return 0; | 1611 | return rc; |
1483 | } | 1612 | } |
1484 | 1613 | ||
1485 | static int e100_set_settings(struct net_device *dev, | 1614 | static int e100_get_settings(struct net_device *dev, |
1486 | struct ethtool_cmd *ecmd) | 1615 | struct ethtool_cmd *cmd) |
1487 | { | 1616 | { |
1488 | ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | | 1617 | struct net_local *np = netdev_priv(dev); |
1489 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | 1618 | int err; |
1490 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; | ||
1491 | ecmd->port = PORT_TP; | ||
1492 | ecmd->transceiver = XCVR_EXTERNAL; | ||
1493 | ecmd->phy_address = mdio_phy_addr; | ||
1494 | ecmd->speed = current_speed; | ||
1495 | ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; | ||
1496 | ecmd->advertising = ADVERTISED_TP; | ||
1497 | 1619 | ||
1498 | if (current_duplex == autoneg && current_speed_selection == 0) | 1620 | spin_lock_irq(&np->lock); |
1499 | ecmd->advertising |= ADVERTISED_Autoneg; | 1621 | err = mii_ethtool_gset(&np->mii_if, cmd); |
1500 | else { | 1622 | spin_unlock_irq(&np->lock); |
1501 | ecmd->advertising |= | ||
1502 | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
1503 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | ||
1504 | if (current_speed_selection == 10) | ||
1505 | ecmd->advertising &= ~(ADVERTISED_100baseT_Half | | ||
1506 | ADVERTISED_100baseT_Full); | ||
1507 | else if (current_speed_selection == 100) | ||
1508 | ecmd->advertising &= ~(ADVERTISED_10baseT_Half | | ||
1509 | ADVERTISED_10baseT_Full); | ||
1510 | if (current_duplex == half) | ||
1511 | ecmd->advertising &= ~(ADVERTISED_10baseT_Full | | ||
1512 | ADVERTISED_100baseT_Full); | ||
1513 | else if (current_duplex == full) | ||
1514 | ecmd->advertising &= ~(ADVERTISED_10baseT_Half | | ||
1515 | ADVERTISED_100baseT_Half); | ||
1516 | } | ||
1517 | 1623 | ||
1518 | ecmd->autoneg = AUTONEG_ENABLE; | 1624 | /* The PHY may support 1000baseT, but the Etrax100 does not. */ |
1519 | return 0; | 1625 | cmd->supported &= ~(SUPPORTED_1000baseT_Half |
1626 | | SUPPORTED_1000baseT_Full); | ||
1627 | return err; | ||
1520 | } | 1628 | } |
1521 | 1629 | ||
1522 | static int e100_set_settings(struct net_device *dev, | 1630 | static int e100_set_settings(struct net_device *dev, |
@@ -1560,7 +1668,8 @@ static const struct ethtool_ops e100_ethtool_ops = { | |||
1560 | static int | 1668 | static int |
1561 | e100_set_config(struct net_device *dev, struct ifmap *map) | 1669 | e100_set_config(struct net_device *dev, struct ifmap *map) |
1562 | { | 1670 | { |
1563 | struct net_local *np = (struct net_local *)dev->priv; | 1671 | struct net_local *np = netdev_priv(dev); |
1672 | |||
1564 | spin_lock(&np->lock); /* Preempt protection */ | 1673 | spin_lock(&np->lock); /* Preempt protection */ |
1565 | 1674 | ||
1566 | switch(map->port) { | 1675 | switch(map->port) { |
@@ -1612,7 +1721,6 @@ update_tx_stats(struct net_device_stats *es) | |||
1612 | es->collisions += | 1721 | es->collisions += |
1613 | IO_EXTRACT(R_TR_COUNTERS, single_col, r) + | 1722 | IO_EXTRACT(R_TR_COUNTERS, single_col, r) + |
1614 | IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); | 1723 | IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); |
1615 | es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r); | ||
1616 | } | 1724 | } |
1617 | 1725 | ||
1618 | /* | 1726 | /* |
@@ -1622,8 +1730,9 @@ update_tx_stats(struct net_device_stats *es) | |||
1622 | static struct net_device_stats * | 1730 | static struct net_device_stats * |
1623 | e100_get_stats(struct net_device *dev) | 1731 | e100_get_stats(struct net_device *dev) |
1624 | { | 1732 | { |
1625 | struct net_local *lp = (struct net_local *)dev->priv; | 1733 | struct net_local *lp = netdev_priv(dev); |
1626 | unsigned long flags; | 1734 | unsigned long flags; |
1735 | |||
1627 | spin_lock_irqsave(&lp->lock, flags); | 1736 | spin_lock_irqsave(&lp->lock, flags); |
1628 | 1737 | ||
1629 | update_rx_stats(&lp->stats); | 1738 | update_rx_stats(&lp->stats); |
@@ -1643,13 +1752,13 @@ e100_get_stats(struct net_device *dev) | |||
1643 | static void | 1752 | static void |
1644 | set_multicast_list(struct net_device *dev) | 1753 | set_multicast_list(struct net_device *dev) |
1645 | { | 1754 | { |
1646 | struct net_local *lp = (struct net_local *)dev->priv; | 1755 | struct net_local *lp = netdev_priv(dev); |
1647 | int num_addr = dev->mc_count; | 1756 | int num_addr = dev->mc_count; |
1648 | unsigned long int lo_bits; | 1757 | unsigned long int lo_bits; |
1649 | unsigned long int hi_bits; | 1758 | unsigned long int hi_bits; |
1759 | |||
1650 | spin_lock(&lp->lock); | 1760 | spin_lock(&lp->lock); |
1651 | if (dev->flags & IFF_PROMISC) | 1761 | if (dev->flags & IFF_PROMISC) { |
1652 | { | ||
1653 | /* promiscuous mode */ | 1762 | /* promiscuous mode */ |
1654 | lo_bits = 0xfffffffful; | 1763 | lo_bits = 0xfffffffful; |
1655 | hi_bits = 0xfffffffful; | 1764 | hi_bits = 0xfffffffful; |
@@ -1679,9 +1788,10 @@ set_multicast_list(struct net_device *dev) | |||
1679 | struct dev_mc_list *dmi = dev->mc_list; | 1788 | struct dev_mc_list *dmi = dev->mc_list; |
1680 | int i; | 1789 | int i; |
1681 | char *baddr; | 1790 | char *baddr; |
1791 | |||
1682 | lo_bits = 0x00000000ul; | 1792 | lo_bits = 0x00000000ul; |
1683 | hi_bits = 0x00000000ul; | 1793 | hi_bits = 0x00000000ul; |
1684 | for (i=0; i<num_addr; i++) { | 1794 | for (i = 0; i < num_addr; i++) { |
1685 | /* Calculate the hash index for the GA registers */ | 1795 | /* Calculate the hash index for the GA registers */ |
1686 | 1796 | ||
1687 | hash_ix = 0; | 1797 | hash_ix = 0; |
@@ -1708,8 +1818,7 @@ set_multicast_list(struct net_device *dev) | |||
1708 | 1818 | ||
1709 | if (hash_ix >= 32) { | 1819 | if (hash_ix >= 32) { |
1710 | hi_bits |= (1 << (hash_ix-32)); | 1820 | hi_bits |= (1 << (hash_ix-32)); |
1711 | } | 1821 | } else { |
1712 | else { | ||
1713 | lo_bits |= (1 << hash_ix); | 1822 | lo_bits |= (1 << hash_ix); |
1714 | } | 1823 | } |
1715 | dmi = dmi->next; | 1824 | dmi = dmi->next; |
@@ -1724,10 +1833,11 @@ set_multicast_list(struct net_device *dev) | |||
1724 | } | 1833 | } |
1725 | 1834 | ||
1726 | void | 1835 | void |
1727 | e100_hardware_send_packet(char *buf, int length) | 1836 | e100_hardware_send_packet(struct net_local *np, char *buf, int length) |
1728 | { | 1837 | { |
1729 | D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); | 1838 | D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); |
1730 | 1839 | ||
1840 | spin_lock(&np->led_lock); | ||
1731 | if (!led_active && time_after(jiffies, led_next_time)) { | 1841 | if (!led_active && time_after(jiffies, led_next_time)) { |
1732 | /* light the network leds depending on the current speed. */ | 1842 | /* light the network leds depending on the current speed. */ |
1733 | e100_set_network_leds(NETWORK_ACTIVITY); | 1843 | e100_set_network_leds(NETWORK_ACTIVITY); |
@@ -1737,6 +1847,7 @@ e100_hardware_send_packet(char *buf, int length) | |||
1737 | led_active = 1; | 1847 | led_active = 1; |
1738 | mod_timer(&clear_led_timer, jiffies + HZ/10); | 1848 | mod_timer(&clear_led_timer, jiffies + HZ/10); |
1739 | } | 1849 | } |
1850 | spin_unlock(&np->led_lock); | ||
1740 | 1851 | ||
1741 | /* configure the tx dma descriptor */ | 1852 | /* configure the tx dma descriptor */ |
1742 | myNextTxDesc->descr.sw_len = length; | 1853 | myNextTxDesc->descr.sw_len = length; |
@@ -1754,6 +1865,11 @@ e100_hardware_send_packet(char *buf, int length) | |||
1754 | static void | 1865 | static void |
1755 | e100_clear_network_leds(unsigned long dummy) | 1866 | e100_clear_network_leds(unsigned long dummy) |
1756 | { | 1867 | { |
1868 | struct net_device *dev = (struct net_device *)dummy; | ||
1869 | struct net_local *np = netdev_priv(dev); | ||
1870 | |||
1871 | spin_lock(&np->led_lock); | ||
1872 | |||
1757 | if (led_active && time_after(jiffies, led_next_time)) { | 1873 | if (led_active && time_after(jiffies, led_next_time)) { |
1758 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | 1874 | e100_set_network_leds(NO_NETWORK_ACTIVITY); |
1759 | 1875 | ||
@@ -1761,6 +1877,8 @@ e100_clear_network_leds(unsigned long dummy) | |||
1761 | led_next_time = jiffies + NET_FLASH_PAUSE; | 1877 | led_next_time = jiffies + NET_FLASH_PAUSE; |
1762 | led_active = 0; | 1878 | led_active = 0; |
1763 | } | 1879 | } |
1880 | |||
1881 | spin_unlock(&np->led_lock); | ||
1764 | } | 1882 | } |
1765 | 1883 | ||
1766 | static void | 1884 | static void |
@@ -1781,19 +1899,25 @@ e100_set_network_leds(int active) | |||
1781 | #else | 1899 | #else |
1782 | LED_NETWORK_SET(LED_OFF); | 1900 | LED_NETWORK_SET(LED_OFF); |
1783 | #endif | 1901 | #endif |
1784 | } | 1902 | } else if (light_leds) { |
1785 | else if (light_leds) { | ||
1786 | if (current_speed == 10) { | 1903 | if (current_speed == 10) { |
1787 | LED_NETWORK_SET(LED_ORANGE); | 1904 | LED_NETWORK_SET(LED_ORANGE); |
1788 | } else { | 1905 | } else { |
1789 | LED_NETWORK_SET(LED_GREEN); | 1906 | LED_NETWORK_SET(LED_GREEN); |
1790 | } | 1907 | } |
1791 | } | 1908 | } else { |
1792 | else { | ||
1793 | LED_NETWORK_SET(LED_OFF); | 1909 | LED_NETWORK_SET(LED_OFF); |
1794 | } | 1910 | } |
1795 | } | 1911 | } |
1796 | 1912 | ||
1913 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1914 | static void | ||
1915 | e100_netpoll(struct net_device* netdev) | ||
1916 | { | ||
1917 | e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); | ||
1918 | } | ||
1919 | #endif | ||
1920 | |||
1797 | static int | 1921 | static int |
1798 | etrax_init_module(void) | 1922 | etrax_init_module(void) |
1799 | { | 1923 | { |