aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/cris/eth_v10.c440
1 files changed, 284 insertions, 156 deletions
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index edd6828f0a78..917b7b46f1a7 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -250,6 +250,7 @@
250#include <asm/system.h> 250#include <asm/system.h>
251#include <asm/ethernet.h> 251#include <asm/ethernet.h>
252#include <asm/cache.h> 252#include <asm/cache.h>
253#include <asm/arch/io_interface_mux.h>
253 254
254//#define ETHDEBUG 255//#define ETHDEBUG
255#define D(x) 256#define D(x)
@@ -279,6 +280,9 @@ struct net_local {
279 * by this lock as well. 280 * by this lock as well.
280 */ 281 */
281 spinlock_t lock; 282 spinlock_t lock;
283
284 spinlock_t led_lock; /* Protect LED state */
285 spinlock_t transceiver_lock; /* Protect transceiver state. */
282}; 286};
283 287
284typedef struct etrax_eth_descr 288typedef struct etrax_eth_descr
@@ -295,8 +299,6 @@ struct transceiver_ops
295 void (*check_duplex)(struct net_device* dev); 299 void (*check_duplex)(struct net_device* dev);
296}; 300};
297 301
298struct transceiver_ops* transceiver;
299
300/* Duplex settings */ 302/* Duplex settings */
301enum duplex 303enum duplex
302{ 304{
@@ -307,7 +309,7 @@ enum duplex
307 309
308/* Dma descriptors etc. */ 310/* Dma descriptors etc. */
309 311
310#define MAX_MEDIA_DATA_SIZE 1518 312#define MAX_MEDIA_DATA_SIZE 1522
311 313
312#define MIN_PACKET_LEN 46 314#define MIN_PACKET_LEN 46
313#define ETHER_HEAD_LEN 14 315#define ETHER_HEAD_LEN 14
@@ -332,8 +334,8 @@ enum duplex
332 334
333/*Intel LXT972A specific*/ 335/*Intel LXT972A specific*/
334#define MDIO_INT_STATUS_REG_2 0x0011 336#define MDIO_INT_STATUS_REG_2 0x0011
335#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 ) 337#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
336#define MDIO_INT_SPEED ( 1 << 14 ) 338#define MDIO_INT_SPEED (1 << 14)
337 339
338/* Network flash constants */ 340/* Network flash constants */
339#define NET_FLASH_TIME (HZ/50) /* 20 ms */ 341#define NET_FLASH_TIME (HZ/50) /* 20 ms */
@@ -344,8 +346,8 @@ enum duplex
344#define NO_NETWORK_ACTIVITY 0 346#define NO_NETWORK_ACTIVITY 0
345#define NETWORK_ACTIVITY 1 347#define NETWORK_ACTIVITY 1
346 348
347#define NBR_OF_RX_DESC 64 349#define NBR_OF_RX_DESC 32
348#define NBR_OF_TX_DESC 256 350#define NBR_OF_TX_DESC 16
349 351
350/* Large packets are sent directly to upper layers while small packets are */ 352/* Large packets are sent directly to upper layers while small packets are */
351/* copied (to reduce memory waste). The following constant decides the breakpoint */ 353/* copied (to reduce memory waste). The following constant decides the breakpoint */
@@ -367,7 +369,6 @@ enum duplex
367static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to 369static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
368 to be processed */ 370 to be processed */
369static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ 371static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
370static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
371 372
372static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); 373static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
373 374
@@ -377,7 +378,6 @@ static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
377static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); 378static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
378 379
379static unsigned int network_rec_config_shadow = 0; 380static unsigned int network_rec_config_shadow = 0;
380static unsigned int mdio_phy_addr; /* Transciever address */
381 381
382static unsigned int network_tr_ctrl_shadow = 0; 382static unsigned int network_tr_ctrl_shadow = 0;
383 383
@@ -411,7 +411,7 @@ static int e100_set_config(struct net_device* dev, struct ifmap* map);
411static void e100_tx_timeout(struct net_device *dev); 411static void e100_tx_timeout(struct net_device *dev);
412static struct net_device_stats *e100_get_stats(struct net_device *dev); 412static struct net_device_stats *e100_get_stats(struct net_device *dev);
413static void set_multicast_list(struct net_device *dev); 413static void set_multicast_list(struct net_device *dev);
414static void e100_hardware_send_packet(char *buf, int length); 414static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
415static void update_rx_stats(struct net_device_stats *); 415static void update_rx_stats(struct net_device_stats *);
416static void update_tx_stats(struct net_device_stats *); 416static void update_tx_stats(struct net_device_stats *);
417static int e100_probe_transceiver(struct net_device* dev); 417static int e100_probe_transceiver(struct net_device* dev);
@@ -434,7 +434,10 @@ static void e100_clear_network_leds(unsigned long dummy);
434static void e100_set_network_leds(int active); 434static void e100_set_network_leds(int active);
435 435
436static const struct ethtool_ops e100_ethtool_ops; 436static const struct ethtool_ops e100_ethtool_ops;
437 437#if defined(CONFIG_ETRAX_NO_PHY)
438static void dummy_check_speed(struct net_device* dev);
439static void dummy_check_duplex(struct net_device* dev);
440#else
438static void broadcom_check_speed(struct net_device* dev); 441static void broadcom_check_speed(struct net_device* dev);
439static void broadcom_check_duplex(struct net_device* dev); 442static void broadcom_check_duplex(struct net_device* dev);
440static void tdk_check_speed(struct net_device* dev); 443static void tdk_check_speed(struct net_device* dev);
@@ -443,16 +446,28 @@ static void intel_check_speed(struct net_device* dev);
443static void intel_check_duplex(struct net_device* dev); 446static void intel_check_duplex(struct net_device* dev);
444static void generic_check_speed(struct net_device* dev); 447static void generic_check_speed(struct net_device* dev);
445static void generic_check_duplex(struct net_device* dev); 448static void generic_check_duplex(struct net_device* dev);
449#endif
450#ifdef CONFIG_NET_POLL_CONTROLLER
451static void e100_netpoll(struct net_device* dev);
452#endif
453
454static int autoneg_normal = 1;
446 455
447struct transceiver_ops transceivers[] = 456struct transceiver_ops transceivers[] =
448{ 457{
458#if defined(CONFIG_ETRAX_NO_PHY)
459 {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
460#else
449 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ 461 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
450 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ 462 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
451 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ 463 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
452 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ 464 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
453 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ 465 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
466#endif
454}; 467};
455 468
469struct transceiver_ops* transceiver = &transceivers[0];
470
456#define tx_done(dev) (*R_DMA_CH0_CMD == 0) 471#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
457 472
458/* 473/*
@@ -471,14 +486,22 @@ etrax_ethernet_init(void)
471 int i, err; 486 int i, err;
472 487
473 printk(KERN_INFO 488 printk(KERN_INFO
474 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n"); 489 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
475 490
476 dev = alloc_etherdev(sizeof(struct net_local)); 491 if (cris_request_io_interface(if_eth, cardname)) {
477 np = dev->priv; 492 printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
493 return -EBUSY;
494 }
478 495
496 dev = alloc_etherdev(sizeof(struct net_local));
479 if (!dev) 497 if (!dev)
480 return -ENOMEM; 498 return -ENOMEM;
481 499
500 np = netdev_priv(dev);
501
502 /* we do our own locking */
503 dev->features |= NETIF_F_LLTX;
504
482 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ 505 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
483 506
484 /* now setup our etrax specific stuff */ 507 /* now setup our etrax specific stuff */
@@ -498,14 +521,22 @@ etrax_ethernet_init(void)
498 dev->do_ioctl = e100_ioctl; 521 dev->do_ioctl = e100_ioctl;
499 dev->set_config = e100_set_config; 522 dev->set_config = e100_set_config;
500 dev->tx_timeout = e100_tx_timeout; 523 dev->tx_timeout = e100_tx_timeout;
524#ifdef CONFIG_NET_POLL_CONTROLLER
525 dev->poll_controller = e100_netpoll;
526#endif
527
528 spin_lock_init(&np->lock);
529 spin_lock_init(&np->led_lock);
530 spin_lock_init(&np->transceiver_lock);
501 531
502 /* Initialise the list of Etrax DMA-descriptors */ 532 /* Initialise the list of Etrax DMA-descriptors */
503 533
504 /* Initialise receive descriptors */ 534 /* Initialise receive descriptors */
505 535
506 for (i = 0; i < NBR_OF_RX_DESC; i++) { 536 for (i = 0; i < NBR_OF_RX_DESC; i++) {
507 /* Allocate two extra cachelines to make sure that buffer used by DMA 537 /* Allocate two extra cachelines to make sure that buffer used
508 * does not share cacheline with any other data (to avoid cache bug) 538 * by DMA does not share cacheline with any other data (to
539 * avoid cache bug)
509 */ 540 */
510 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 541 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
511 if (!RxDescList[i].skb) 542 if (!RxDescList[i].skb)
@@ -541,7 +572,6 @@ etrax_ethernet_init(void)
541 572
542 myNextRxDesc = &RxDescList[0]; 573 myNextRxDesc = &RxDescList[0];
543 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 574 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
544 myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
545 myFirstTxDesc = &TxDescList[0]; 575 myFirstTxDesc = &TxDescList[0];
546 myNextTxDesc = &TxDescList[0]; 576 myNextTxDesc = &TxDescList[0];
547 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; 577 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
@@ -562,10 +592,11 @@ etrax_ethernet_init(void)
562 current_speed = 10; 592 current_speed = 10;
563 current_speed_selection = 0; /* Auto */ 593 current_speed_selection = 0; /* Auto */
564 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 594 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
565 duplex_timer.data = (unsigned long)dev; 595 speed_timer.data = (unsigned long)dev;
566 speed_timer.function = e100_check_speed; 596 speed_timer.function = e100_check_speed;
567 597
568 clear_led_timer.function = e100_clear_network_leds; 598 clear_led_timer.function = e100_clear_network_leds;
599 clear_led_timer.data = (unsigned long)dev;
569 600
570 full_duplex = 0; 601 full_duplex = 0;
571 current_duplex = autoneg; 602 current_duplex = autoneg;
@@ -574,7 +605,6 @@ etrax_ethernet_init(void)
574 duplex_timer.function = e100_check_duplex; 605 duplex_timer.function = e100_check_duplex;
575 606
576 /* Initialize mii interface */ 607 /* Initialize mii interface */
577 np->mii_if.phy_id = mdio_phy_addr;
578 np->mii_if.phy_id_mask = 0x1f; 608 np->mii_if.phy_id_mask = 0x1f;
579 np->mii_if.reg_num_mask = 0x1f; 609 np->mii_if.reg_num_mask = 0x1f;
580 np->mii_if.dev = dev; 610 np->mii_if.dev = dev;
@@ -585,6 +615,9 @@ etrax_ethernet_init(void)
585 /* unwanted addresses are matched */ 615 /* unwanted addresses are matched */
586 *R_NETWORK_GA_0 = 0x00000000; 616 *R_NETWORK_GA_0 = 0x00000000;
587 *R_NETWORK_GA_1 = 0x00000000; 617 *R_NETWORK_GA_1 = 0x00000000;
618
619 /* Initialize next time the led can flash */
620 led_next_time = jiffies;
588 return 0; 621 return 0;
589} 622}
590 623
@@ -595,9 +628,9 @@ etrax_ethernet_init(void)
595static int 628static int
596e100_set_mac_address(struct net_device *dev, void *p) 629e100_set_mac_address(struct net_device *dev, void *p)
597{ 630{
598 struct net_local *np = (struct net_local *)dev->priv; 631 struct net_local *np = netdev_priv(dev);
599 struct sockaddr *addr = p; 632 struct sockaddr *addr = p;
600 int i; 633 DECLARE_MAC_BUF(mac);
601 634
602 spin_lock(&np->lock); /* preemption protection */ 635 spin_lock(&np->lock); /* preemption protection */
603 636
@@ -686,6 +719,25 @@ e100_open(struct net_device *dev)
686 goto grace_exit2; 719 goto grace_exit2;
687 } 720 }
688 721
722 /*
723 * Always allocate the DMA channels after the IRQ,
724 * and clean up on failure.
725 */
726
727 if (cris_request_dma(NETWORK_TX_DMA_NBR,
728 cardname,
729 DMA_VERBOSE_ON_ERROR,
730 dma_eth)) {
731 goto grace_exit3;
732 }
733
734 if (cris_request_dma(NETWORK_RX_DMA_NBR,
735 cardname,
736 DMA_VERBOSE_ON_ERROR,
737 dma_eth)) {
738 goto grace_exit4;
739 }
740
689 /* give the HW an idea of what MAC address we want */ 741 /* give the HW an idea of what MAC address we want */
690 742
691 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | 743 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
@@ -700,6 +752,7 @@ e100_open(struct net_device *dev)
700 752
701 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ 753 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
702#else 754#else
755 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
703 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); 756 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
704 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); 757 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
705 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); 758 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
@@ -719,8 +772,7 @@ e100_open(struct net_device *dev)
719 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); 772 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
720 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 773 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
721 774
722 save_flags(flags); 775 local_irq_save(flags);
723 cli();
724 776
725 /* enable the irq's for ethernet DMA */ 777 /* enable the irq's for ethernet DMA */
726 778
@@ -752,12 +804,13 @@ e100_open(struct net_device *dev)
752 804
753 *R_DMA_CH0_FIRST = 0; 805 *R_DMA_CH0_FIRST = 0;
754 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); 806 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
807 netif_start_queue(dev);
755 808
756 restore_flags(flags); 809 local_irq_restore(flags);
757 810
758 /* Probe for transceiver */ 811 /* Probe for transceiver */
759 if (e100_probe_transceiver(dev)) 812 if (e100_probe_transceiver(dev))
760 goto grace_exit3; 813 goto grace_exit5;
761 814
762 /* Start duplex/speed timers */ 815 /* Start duplex/speed timers */
763 add_timer(&speed_timer); 816 add_timer(&speed_timer);
@@ -766,10 +819,14 @@ e100_open(struct net_device *dev)
766 /* We are now ready to accept transmit requeusts from 819 /* We are now ready to accept transmit requeusts from
767 * the queueing layer of the networking. 820 * the queueing layer of the networking.
768 */ 821 */
769 netif_start_queue(dev); 822 netif_carrier_on(dev);
770 823
771 return 0; 824 return 0;
772 825
826grace_exit5:
827 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
828grace_exit4:
829 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
773grace_exit3: 830grace_exit3:
774 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 831 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
775grace_exit2: 832grace_exit2:
@@ -780,12 +837,20 @@ grace_exit0:
780 return -EAGAIN; 837 return -EAGAIN;
781} 838}
782 839
783 840#if defined(CONFIG_ETRAX_NO_PHY)
841static void
842dummy_check_speed(struct net_device* dev)
843{
844 current_speed = 100;
845}
846#else
784static void 847static void
785generic_check_speed(struct net_device* dev) 848generic_check_speed(struct net_device* dev)
786{ 849{
787 unsigned long data; 850 unsigned long data;
788 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 851 struct net_local *np = netdev_priv(dev);
852
853 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
789 if ((data & ADVERTISE_100FULL) || 854 if ((data & ADVERTISE_100FULL) ||
790 (data & ADVERTISE_100HALF)) 855 (data & ADVERTISE_100HALF))
791 current_speed = 100; 856 current_speed = 100;
@@ -797,7 +862,10 @@ static void
797tdk_check_speed(struct net_device* dev) 862tdk_check_speed(struct net_device* dev)
798{ 863{
799 unsigned long data; 864 unsigned long data;
800 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 865 struct net_local *np = netdev_priv(dev);
866
867 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
868 MDIO_TDK_DIAGNOSTIC_REG);
801 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); 869 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
802} 870}
803 871
@@ -805,7 +873,10 @@ static void
805broadcom_check_speed(struct net_device* dev) 873broadcom_check_speed(struct net_device* dev)
806{ 874{
807 unsigned long data; 875 unsigned long data;
808 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 876 struct net_local *np = netdev_priv(dev);
877
878 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
879 MDIO_AUX_CTRL_STATUS_REG);
809 current_speed = (data & MDIO_BC_SPEED ? 100 : 10); 880 current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
810} 881}
811 882
@@ -813,46 +884,62 @@ static void
813intel_check_speed(struct net_device* dev) 884intel_check_speed(struct net_device* dev)
814{ 885{
815 unsigned long data; 886 unsigned long data;
816 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 887 struct net_local *np = netdev_priv(dev);
888
889 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
890 MDIO_INT_STATUS_REG_2);
817 current_speed = (data & MDIO_INT_SPEED ? 100 : 10); 891 current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
818} 892}
819 893#endif
820static void 894static void
821e100_check_speed(unsigned long priv) 895e100_check_speed(unsigned long priv)
822{ 896{
823 struct net_device* dev = (struct net_device*)priv; 897 struct net_device* dev = (struct net_device*)priv;
898 struct net_local *np = netdev_priv(dev);
824 static int led_initiated = 0; 899 static int led_initiated = 0;
825 unsigned long data; 900 unsigned long data;
826 int old_speed = current_speed; 901 int old_speed = current_speed;
827 902
828 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR); 903 spin_lock(&np->transceiver_lock);
904
905 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
829 if (!(data & BMSR_LSTATUS)) { 906 if (!(data & BMSR_LSTATUS)) {
830 current_speed = 0; 907 current_speed = 0;
831 } else { 908 } else {
832 transceiver->check_speed(dev); 909 transceiver->check_speed(dev);
833 } 910 }
834 911
912 spin_lock(&np->led_lock);
835 if ((old_speed != current_speed) || !led_initiated) { 913 if ((old_speed != current_speed) || !led_initiated) {
836 led_initiated = 1; 914 led_initiated = 1;
837 e100_set_network_leds(NO_NETWORK_ACTIVITY); 915 e100_set_network_leds(NO_NETWORK_ACTIVITY);
916 if (current_speed)
917 netif_carrier_on(dev);
918 else
919 netif_carrier_off(dev);
838 } 920 }
921 spin_unlock(&np->led_lock);
839 922
840 /* Reinitialize the timer. */ 923 /* Reinitialize the timer. */
841 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 924 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
842 add_timer(&speed_timer); 925 add_timer(&speed_timer);
926
927 spin_unlock(&np->transceiver_lock);
843} 928}
844 929
845static void 930static void
846e100_negotiate(struct net_device* dev) 931e100_negotiate(struct net_device* dev)
847{ 932{
848 unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 933 struct net_local *np = netdev_priv(dev);
934 unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
935 MII_ADVERTISE);
849 936
850 /* Discard old speed and duplex settings */ 937 /* Discard old speed and duplex settings */
851 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | 938 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
852 ADVERTISE_10HALF | ADVERTISE_10FULL); 939 ADVERTISE_10HALF | ADVERTISE_10FULL);
853 940
854 switch (current_speed_selection) { 941 switch (current_speed_selection) {
855 case 10 : 942 case 10:
856 if (current_duplex == full) 943 if (current_duplex == full)
857 data |= ADVERTISE_10FULL; 944 data |= ADVERTISE_10FULL;
858 else if (current_duplex == half) 945 else if (current_duplex == half)
@@ -861,7 +948,7 @@ e100_negotiate(struct net_device* dev)
861 data |= ADVERTISE_10HALF | ADVERTISE_10FULL; 948 data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
862 break; 949 break;
863 950
864 case 100 : 951 case 100:
865 if (current_duplex == full) 952 if (current_duplex == full)
866 data |= ADVERTISE_100FULL; 953 data |= ADVERTISE_100FULL;
867 else if (current_duplex == half) 954 else if (current_duplex == half)
@@ -870,7 +957,7 @@ e100_negotiate(struct net_device* dev)
870 data |= ADVERTISE_100HALF | ADVERTISE_100FULL; 957 data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
871 break; 958 break;
872 959
873 case 0 : /* Auto */ 960 case 0: /* Auto */
874 if (current_duplex == full) 961 if (current_duplex == full)
875 data |= ADVERTISE_100FULL | ADVERTISE_10FULL; 962 data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
876 else if (current_duplex == half) 963 else if (current_duplex == half)
@@ -880,35 +967,44 @@ e100_negotiate(struct net_device* dev)
880 ADVERTISE_100HALF | ADVERTISE_100FULL; 967 ADVERTISE_100HALF | ADVERTISE_100FULL;
881 break; 968 break;
882 969
883 default : /* assume autoneg speed and duplex */ 970 default: /* assume autoneg speed and duplex */
884 data |= ADVERTISE_10HALF | ADVERTISE_10FULL | 971 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
885 ADVERTISE_100HALF | ADVERTISE_100FULL; 972 ADVERTISE_100HALF | ADVERTISE_100FULL;
973 break;
886 } 974 }
887 975
888 e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data); 976 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
889 977
890 /* Renegotiate with link partner */ 978 /* Renegotiate with link partner */
891 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 979 if (autoneg_normal) {
980 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
892 data |= BMCR_ANENABLE | BMCR_ANRESTART; 981 data |= BMCR_ANENABLE | BMCR_ANRESTART;
893 982 }
894 e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data); 983 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
895} 984}
896 985
897static void 986static void
898e100_set_speed(struct net_device* dev, unsigned long speed) 987e100_set_speed(struct net_device* dev, unsigned long speed)
899{ 988{
989 struct net_local *np = netdev_priv(dev);
990
991 spin_lock(&np->transceiver_lock);
900 if (speed != current_speed_selection) { 992 if (speed != current_speed_selection) {
901 current_speed_selection = speed; 993 current_speed_selection = speed;
902 e100_negotiate(dev); 994 e100_negotiate(dev);
903 } 995 }
996 spin_unlock(&np->transceiver_lock);
904} 997}
905 998
906static void 999static void
907e100_check_duplex(unsigned long priv) 1000e100_check_duplex(unsigned long priv)
908{ 1001{
909 struct net_device *dev = (struct net_device *)priv; 1002 struct net_device *dev = (struct net_device *)priv;
910 struct net_local *np = (struct net_local *)dev->priv; 1003 struct net_local *np = netdev_priv(dev);
911 int old_duplex = full_duplex; 1004 int old_duplex;
1005
1006 spin_lock(&np->transceiver_lock);
1007 old_duplex = full_duplex;
912 transceiver->check_duplex(dev); 1008 transceiver->check_duplex(dev);
913 if (old_duplex != full_duplex) { 1009 if (old_duplex != full_duplex) {
914 /* Duplex changed */ 1010 /* Duplex changed */
@@ -920,13 +1016,22 @@ e100_check_duplex(unsigned long priv)
920 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; 1016 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
921 add_timer(&duplex_timer); 1017 add_timer(&duplex_timer);
922 np->mii_if.full_duplex = full_duplex; 1018 np->mii_if.full_duplex = full_duplex;
1019 spin_unlock(&np->transceiver_lock);
923} 1020}
924 1021#if defined(CONFIG_ETRAX_NO_PHY)
1022static void
1023dummy_check_duplex(struct net_device* dev)
1024{
1025 full_duplex = 1;
1026}
1027#else
925static void 1028static void
926generic_check_duplex(struct net_device* dev) 1029generic_check_duplex(struct net_device* dev)
927{ 1030{
928 unsigned long data; 1031 unsigned long data;
929 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 1032 struct net_local *np = netdev_priv(dev);
1033
1034 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
930 if ((data & ADVERTISE_10FULL) || 1035 if ((data & ADVERTISE_10FULL) ||
931 (data & ADVERTISE_100FULL)) 1036 (data & ADVERTISE_100FULL))
932 full_duplex = 1; 1037 full_duplex = 1;
@@ -938,7 +1043,10 @@ static void
938tdk_check_duplex(struct net_device* dev) 1043tdk_check_duplex(struct net_device* dev)
939{ 1044{
940 unsigned long data; 1045 unsigned long data;
941 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 1046 struct net_local *np = netdev_priv(dev);
1047
1048 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1049 MDIO_TDK_DIAGNOSTIC_REG);
942 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; 1050 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
943} 1051}
944 1052
@@ -946,7 +1054,10 @@ static void
946broadcom_check_duplex(struct net_device* dev) 1054broadcom_check_duplex(struct net_device* dev)
947{ 1055{
948 unsigned long data; 1056 unsigned long data;
949 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 1057 struct net_local *np = netdev_priv(dev);
1058
1059 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1060 MDIO_AUX_CTRL_STATUS_REG);
950 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; 1061 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
951} 1062}
952 1063
@@ -954,38 +1065,55 @@ static void
954intel_check_duplex(struct net_device* dev) 1065intel_check_duplex(struct net_device* dev)
955{ 1066{
956 unsigned long data; 1067 unsigned long data;
957 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 1068 struct net_local *np = netdev_priv(dev);
1069
1070 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1071 MDIO_INT_STATUS_REG_2);
958 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; 1072 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
959} 1073}
960 1074#endif
961static void 1075static void
962e100_set_duplex(struct net_device* dev, enum duplex new_duplex) 1076e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
963{ 1077{
1078 struct net_local *np = netdev_priv(dev);
1079
1080 spin_lock(&np->transceiver_lock);
964 if (new_duplex != current_duplex) { 1081 if (new_duplex != current_duplex) {
965 current_duplex = new_duplex; 1082 current_duplex = new_duplex;
966 e100_negotiate(dev); 1083 e100_negotiate(dev);
967 } 1084 }
1085 spin_unlock(&np->transceiver_lock);
968} 1086}
969 1087
970static int 1088static int
971e100_probe_transceiver(struct net_device* dev) 1089e100_probe_transceiver(struct net_device* dev)
972{ 1090{
1091 int ret = 0;
1092
1093#if !defined(CONFIG_ETRAX_NO_PHY)
973 unsigned int phyid_high; 1094 unsigned int phyid_high;
974 unsigned int phyid_low; 1095 unsigned int phyid_low;
975 unsigned int oui; 1096 unsigned int oui;
976 struct transceiver_ops* ops = NULL; 1097 struct transceiver_ops* ops = NULL;
1098 struct net_local *np = netdev_priv(dev);
1099
1100 spin_lock(&np->transceiver_lock);
977 1101
978 /* Probe MDIO physical address */ 1102 /* Probe MDIO physical address */
979 for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { 1103 for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
980 if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff) 1104 np->mii_if.phy_id++) {
1105 if (e100_get_mdio_reg(dev,
1106 np->mii_if.phy_id, MII_BMSR) != 0xffff)
981 break; 1107 break;
982 } 1108 }
983 if (mdio_phy_addr == 32) 1109 if (np->mii_if.phy_id == 32) {
984 return -ENODEV; 1110 ret = -ENODEV;
1111 goto out;
1112 }
985 1113
986 /* Get manufacturer */ 1114 /* Get manufacturer */
987 phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1); 1115 phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
988 phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2); 1116 phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
989 oui = (phyid_high << 6) | (phyid_low >> 10); 1117 oui = (phyid_high << 6) | (phyid_low >> 10);
990 1118
991 for (ops = &transceivers[0]; ops->oui; ops++) { 1119 for (ops = &transceivers[0]; ops->oui; ops++) {
@@ -993,8 +1121,10 @@ e100_probe_transceiver(struct net_device* dev)
993 break; 1121 break;
994 } 1122 }
995 transceiver = ops; 1123 transceiver = ops;
996 1124out:
997 return 0; 1125 spin_unlock(&np->transceiver_lock);
1126#endif
1127 return ret;
998} 1128}
999 1129
1000static int 1130static int
@@ -1088,13 +1218,14 @@ e100_receive_mdio_bit()
1088static void 1218static void
1089e100_reset_transceiver(struct net_device* dev) 1219e100_reset_transceiver(struct net_device* dev)
1090{ 1220{
1221 struct net_local *np = netdev_priv(dev);
1091 unsigned short cmd; 1222 unsigned short cmd;
1092 unsigned short data; 1223 unsigned short data;
1093 int bitCounter; 1224 int bitCounter;
1094 1225
1095 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 1226 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
1096 1227
1097 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2); 1228 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
1098 1229
1099 e100_send_mdio_cmd(cmd, 1); 1230 e100_send_mdio_cmd(cmd, 1);
1100 1231
@@ -1112,7 +1243,7 @@ e100_reset_transceiver(struct net_device* dev)
1112static void 1243static void
1113e100_tx_timeout(struct net_device *dev) 1244e100_tx_timeout(struct net_device *dev)
1114{ 1245{
1115 struct net_local *np = (struct net_local *)dev->priv; 1246 struct net_local *np = netdev_priv(dev);
1116 unsigned long flags; 1247 unsigned long flags;
1117 1248
1118 spin_lock_irqsave(&np->lock, flags); 1249 spin_lock_irqsave(&np->lock, flags);
@@ -1134,8 +1265,7 @@ e100_tx_timeout(struct net_device *dev)
1134 e100_reset_transceiver(dev); 1265 e100_reset_transceiver(dev);
1135 1266
1136 /* and get rid of the packets that never got an interrupt */ 1267 /* and get rid of the packets that never got an interrupt */
1137 while (myFirstTxDesc != myNextTxDesc) 1268 while (myFirstTxDesc != myNextTxDesc) {
1138 {
1139 dev_kfree_skb(myFirstTxDesc->skb); 1269 dev_kfree_skb(myFirstTxDesc->skb);
1140 myFirstTxDesc->skb = 0; 1270 myFirstTxDesc->skb = 0;
1141 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 1271 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
@@ -1161,7 +1291,7 @@ e100_tx_timeout(struct net_device *dev)
1161static int 1291static int
1162e100_send_packet(struct sk_buff *skb, struct net_device *dev) 1292e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1163{ 1293{
1164 struct net_local *np = (struct net_local *)dev->priv; 1294 struct net_local *np = netdev_priv(dev);
1165 unsigned char *buf = skb->data; 1295 unsigned char *buf = skb->data;
1166 unsigned long flags; 1296 unsigned long flags;
1167 1297
@@ -1174,7 +1304,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1174 1304
1175 dev->trans_start = jiffies; 1305 dev->trans_start = jiffies;
1176 1306
1177 e100_hardware_send_packet(buf, skb->len); 1307 e100_hardware_send_packet(np, buf, skb->len);
1178 1308
1179 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); 1309 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
1180 1310
@@ -1197,13 +1327,15 @@ static irqreturn_t
1197e100rxtx_interrupt(int irq, void *dev_id) 1327e100rxtx_interrupt(int irq, void *dev_id)
1198{ 1328{
1199 struct net_device *dev = (struct net_device *)dev_id; 1329 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = (struct net_local *)dev->priv; 1330 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK2_RD; 1331 unsigned long irqbits;
1202 1332
1203 /* Disable RX/TX IRQs to avoid reentrancy */ 1333 /*
1204 *R_IRQ_MASK2_CLR = 1334 * Note that both rx and tx interrupts are blocked at this point,
1205 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | 1335 * regardless of which got us here.
1206 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); 1336 */
1337
1338 irqbits = *R_IRQ_MASK2_RD;
1207 1339
1208 /* Handle received packets */ 1340 /* Handle received packets */
1209 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { 1341 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
@@ -1219,7 +1351,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1219 * allocate a new buffer to put a packet in. 1351 * allocate a new buffer to put a packet in.
1220 */ 1352 */
1221 e100_rx(dev); 1353 e100_rx(dev);
1222 ((struct net_local *)dev->priv)->stats.rx_packets++; 1354 np->stats.rx_packets++;
1223 /* restart/continue on the channel, for safety */ 1355 /* restart/continue on the channel, for safety */
1224 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1356 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1225 /* clear dma channel 1 eop/descr irq bits */ 1357 /* clear dma channel 1 eop/descr irq bits */
@@ -1233,9 +1365,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1233 } 1365 }
1234 1366
1235 /* Report any packets that have been sent */ 1367 /* Report any packets that have been sent */
1236 while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && 1368 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1237 myFirstTxDesc != myNextTxDesc) 1369 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1238 {
1239 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1370 np->stats.tx_bytes += myFirstTxDesc->skb->len;
1240 np->stats.tx_packets++; 1371 np->stats.tx_packets++;
1241 1372
@@ -1244,19 +1375,15 @@ e100rxtx_interrupt(int irq, void *dev_id)
1244 dev_kfree_skb_irq(myFirstTxDesc->skb); 1375 dev_kfree_skb_irq(myFirstTxDesc->skb);
1245 myFirstTxDesc->skb = 0; 1376 myFirstTxDesc->skb = 0;
1246 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 1377 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1378 /* Wake up queue. */
1379 netif_wake_queue(dev);
1247 } 1380 }
1248 1381
1249 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { 1382 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
1250 /* acknowledge the eop interrupt and wake up queue */ 1383 /* acknowledge the eop interrupt. */
1251 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); 1384 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
1252 netif_wake_queue(dev);
1253 } 1385 }
1254 1386
1255 /* Enable RX/TX IRQs again */
1256 *R_IRQ_MASK2_SET =
1257 IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
1258 IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
1259
1260 return IRQ_HANDLED; 1387 return IRQ_HANDLED;
1261} 1388}
1262 1389
@@ -1264,7 +1391,7 @@ static irqreturn_t
1264e100nw_interrupt(int irq, void *dev_id) 1391e100nw_interrupt(int irq, void *dev_id)
1265{ 1392{
1266 struct net_device *dev = (struct net_device *)dev_id; 1393 struct net_device *dev = (struct net_device *)dev_id;
1267 struct net_local *np = (struct net_local *)dev->priv; 1394 struct net_local *np = netdev_priv(dev);
1268 unsigned long irqbits = *R_IRQ_MASK0_RD; 1395 unsigned long irqbits = *R_IRQ_MASK0_RD;
1269 1396
1270 /* check for underrun irq */ 1397 /* check for underrun irq */
@@ -1286,7 +1413,6 @@ e100nw_interrupt(int irq, void *dev_id)
1286 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1413 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1287 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1414 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1288 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1415 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1289 *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
1290 np->stats.tx_errors++; 1416 np->stats.tx_errors++;
1291 D(printk("ethernet excessive collisions!\n")); 1417 D(printk("ethernet excessive collisions!\n"));
1292 } 1418 }
@@ -1299,12 +1425,13 @@ e100_rx(struct net_device *dev)
1299{ 1425{
1300 struct sk_buff *skb; 1426 struct sk_buff *skb;
1301 int length = 0; 1427 int length = 0;
1302 struct net_local *np = (struct net_local *)dev->priv; 1428 struct net_local *np = netdev_priv(dev);
1303 unsigned char *skb_data_ptr; 1429 unsigned char *skb_data_ptr;
1304#ifdef ETHDEBUG 1430#ifdef ETHDEBUG
1305 int i; 1431 int i;
1306#endif 1432#endif
1307 1433 etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
1434 spin_lock(&np->led_lock);
1308 if (!led_active && time_after(jiffies, led_next_time)) { 1435 if (!led_active && time_after(jiffies, led_next_time)) {
1309 /* light the network leds depending on the current speed. */ 1436 /* light the network leds depending on the current speed. */
1310 e100_set_network_leds(NETWORK_ACTIVITY); 1437 e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1314,9 +1441,10 @@ e100_rx(struct net_device *dev)
1314 led_active = 1; 1441 led_active = 1;
1315 mod_timer(&clear_led_timer, jiffies + HZ/10); 1442 mod_timer(&clear_led_timer, jiffies + HZ/10);
1316 } 1443 }
1444 spin_unlock(&np->led_lock);
1317 1445
1318 length = myNextRxDesc->descr.hw_len - 4; 1446 length = myNextRxDesc->descr.hw_len - 4;
1319 ((struct net_local *)dev->priv)->stats.rx_bytes += length; 1447 np->stats.rx_bytes += length;
1320 1448
1321#ifdef ETHDEBUG 1449#ifdef ETHDEBUG
1322 printk("Got a packet of length %d:\n", length); 1450 printk("Got a packet of length %d:\n", length);
@@ -1336,7 +1464,7 @@ e100_rx(struct net_device *dev)
1336 if (!skb) { 1464 if (!skb) {
1337 np->stats.rx_errors++; 1465 np->stats.rx_errors++;
1338 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1466 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1339 return; 1467 goto update_nextrxdesc;
1340 } 1468 }
1341 1469
1342 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ 1470 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
@@ -1354,15 +1482,15 @@ e100_rx(struct net_device *dev)
1354 else { 1482 else {
1355 /* Large packet, send directly to upper layers and allocate new 1483 /* Large packet, send directly to upper layers and allocate new
1356 * memory (aligned to cache line boundary to avoid bug). 1484 * memory (aligned to cache line boundary to avoid bug).
1357 * Before sending the skb to upper layers we must make sure that 1485 * Before sending the skb to upper layers we must make sure
1358 * skb->data points to the aligned start of the packet. 1486 * that skb->data points to the aligned start of the packet.
1359 */ 1487 */
1360 int align; 1488 int align;
1361 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1489 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1362 if (!new_skb) { 1490 if (!new_skb) {
1363 np->stats.rx_errors++; 1491 np->stats.rx_errors++;
1364 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1492 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1365 return; 1493 goto update_nextrxdesc;
1366 } 1494 }
1367 skb = myNextRxDesc->skb; 1495 skb = myNextRxDesc->skb;
1368 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; 1496 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
@@ -1377,9 +1505,10 @@ e100_rx(struct net_device *dev)
1377 /* Send the packet to the upper layers */ 1505 /* Send the packet to the upper layers */
1378 netif_rx(skb); 1506 netif_rx(skb);
1379 1507
1508 update_nextrxdesc:
1380 /* Prepare for next packet */ 1509 /* Prepare for next packet */
1381 myNextRxDesc->descr.status = 0; 1510 myNextRxDesc->descr.status = 0;
1382 myPrevRxDesc = myNextRxDesc; 1511 prevRxDesc = myNextRxDesc;
1383 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); 1512 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
1384 1513
1385 rx_queue_len++; 1514 rx_queue_len++;
@@ -1387,9 +1516,9 @@ e100_rx(struct net_device *dev)
1387 /* Check if descriptors should be returned */ 1516 /* Check if descriptors should be returned */
1388 if (rx_queue_len == RX_QUEUE_THRESHOLD) { 1517 if (rx_queue_len == RX_QUEUE_THRESHOLD) {
1389 flush_etrax_cache(); 1518 flush_etrax_cache();
1390 myPrevRxDesc->descr.ctrl |= d_eol; 1519 prevRxDesc->descr.ctrl |= d_eol;
1391 myLastRxDesc->descr.ctrl &= ~d_eol; 1520 myLastRxDesc->descr.ctrl &= ~d_eol;
1392 myLastRxDesc = myPrevRxDesc; 1521 myLastRxDesc = prevRxDesc;
1393 rx_queue_len = 0; 1522 rx_queue_len = 0;
1394 } 1523 }
1395} 1524}
@@ -1398,7 +1527,7 @@ e100_rx(struct net_device *dev)
1398static int 1527static int
1399e100_close(struct net_device *dev) 1528e100_close(struct net_device *dev)
1400{ 1529{
1401 struct net_local *np = (struct net_local *)dev->priv; 1530 struct net_local *np = netdev_priv(dev);
1402 1531
1403 printk(KERN_INFO "Closing %s.\n", dev->name); 1532 printk(KERN_INFO "Closing %s.\n", dev->name);
1404 1533
@@ -1426,6 +1555,9 @@ e100_close(struct net_device *dev)
1426 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); 1555 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
1427 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 1556 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
1428 1557
1558 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1559 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
1560
1429 /* Update the statistics here. */ 1561 /* Update the statistics here. */
1430 1562
1431 update_rx_stats(&np->stats); 1563 update_rx_stats(&np->stats);
@@ -1443,18 +1575,11 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1443{ 1575{
1444 struct mii_ioctl_data *data = if_mii(ifr); 1576 struct mii_ioctl_data *data = if_mii(ifr);
1445 struct net_local *np = netdev_priv(dev); 1577 struct net_local *np = netdev_priv(dev);
1578 int rc = 0;
1579 int old_autoneg;
1446 1580
1447 spin_lock(&np->lock); /* Preempt protection */ 1581 spin_lock(&np->lock); /* Preempt protection */
1448 switch (cmd) { 1582 switch (cmd) {
1449 case SIOCGMIIPHY: /* Get PHY address */
1450 data->phy_id = mdio_phy_addr;
1451 break;
1452 case SIOCGMIIREG: /* Read MII register */
1453 data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
1454 break;
1455 case SIOCSMIIREG: /* Write MII register */
1456 e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
1457 break;
1458 /* The ioctls below should be considered obsolete but are */ 1583 /* The ioctls below should be considered obsolete but are */
1459 /* still present for compatability with old scripts/apps */ 1584 /* still present for compatability with old scripts/apps */
1460 case SET_ETH_SPEED_10: /* 10 Mbps */ 1585 case SET_ETH_SPEED_10: /* 10 Mbps */
@@ -1463,60 +1588,47 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1463 case SET_ETH_SPEED_100: /* 100 Mbps */ 1588 case SET_ETH_SPEED_100: /* 100 Mbps */
1464 e100_set_speed(dev, 100); 1589 e100_set_speed(dev, 100);
1465 break; 1590 break;
1466 case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */ 1591 case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
1467 e100_set_speed(dev, 0); 1592 e100_set_speed(dev, 0);
1468 break; 1593 break;
1469 case SET_ETH_DUPLEX_HALF: /* Half duplex. */ 1594 case SET_ETH_DUPLEX_HALF: /* Half duplex */
1470 e100_set_duplex(dev, half); 1595 e100_set_duplex(dev, half);
1471 break; 1596 break;
1472 case SET_ETH_DUPLEX_FULL: /* Full duplex. */ 1597 case SET_ETH_DUPLEX_FULL: /* Full duplex */
1473 e100_set_duplex(dev, full); 1598 e100_set_duplex(dev, full);
1474 break; 1599 break;
1475 case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/ 1600 case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
1476 e100_set_duplex(dev, autoneg); 1601 e100_set_duplex(dev, autoneg);
1477 break; 1602 break;
1603 case SET_ETH_AUTONEG:
1604 old_autoneg = autoneg_normal;
1605 autoneg_normal = *(int*)data;
1606 if (autoneg_normal != old_autoneg)
1607 e100_negotiate(dev);
1608 break;
1478 default: 1609 default:
1479 return -EINVAL; 1610 rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
1611 cmd, NULL);
1612 break;
1480 } 1613 }
1481 spin_unlock(&np->lock); 1614 spin_unlock(&np->lock);
1482 return 0; 1615 return rc;
1483} 1616}
1484 1617
1485static int e100_set_settings(struct net_device *dev, 1618static int e100_get_settings(struct net_device *dev,
1486 struct ethtool_cmd *ecmd) 1619 struct ethtool_cmd *cmd)
1487{ 1620{
1488 ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | 1621 struct net_local *np = netdev_priv(dev);
1489 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1622 int err;
1490 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
1491 ecmd->port = PORT_TP;
1492 ecmd->transceiver = XCVR_EXTERNAL;
1493 ecmd->phy_address = mdio_phy_addr;
1494 ecmd->speed = current_speed;
1495 ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1496 ecmd->advertising = ADVERTISED_TP;
1497 1623
1498 if (current_duplex == autoneg && current_speed_selection == 0) 1624 spin_lock_irq(&np->lock);
1499 ecmd->advertising |= ADVERTISED_Autoneg; 1625 err = mii_ethtool_gset(&np->mii_if, cmd);
1500 else { 1626 spin_unlock_irq(&np->lock);
1501 ecmd->advertising |=
1502 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1503 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1504 if (current_speed_selection == 10)
1505 ecmd->advertising &= ~(ADVERTISED_100baseT_Half |
1506 ADVERTISED_100baseT_Full);
1507 else if (current_speed_selection == 100)
1508 ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
1509 ADVERTISED_10baseT_Full);
1510 if (current_duplex == half)
1511 ecmd->advertising &= ~(ADVERTISED_10baseT_Full |
1512 ADVERTISED_100baseT_Full);
1513 else if (current_duplex == full)
1514 ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
1515 ADVERTISED_100baseT_Half);
1516 }
1517 1627
1518 ecmd->autoneg = AUTONEG_ENABLE; 1628 /* The PHY may support 1000baseT, but the Etrax100 does not. */
1519 return 0; 1629 cmd->supported &= ~(SUPPORTED_1000baseT_Half
1630 | SUPPORTED_1000baseT_Full);
1631 return err;
1520} 1632}
1521 1633
1522static int e100_set_settings(struct net_device *dev, 1634static int e100_set_settings(struct net_device *dev,
@@ -1560,7 +1672,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
1560static int 1672static int
1561e100_set_config(struct net_device *dev, struct ifmap *map) 1673e100_set_config(struct net_device *dev, struct ifmap *map)
1562{ 1674{
1563 struct net_local *np = (struct net_local *)dev->priv; 1675 struct net_local *np = netdev_priv(dev);
1676
1564 spin_lock(&np->lock); /* Preempt protection */ 1677 spin_lock(&np->lock); /* Preempt protection */
1565 1678
1566 switch(map->port) { 1679 switch(map->port) {
@@ -1612,7 +1725,6 @@ update_tx_stats(struct net_device_stats *es)
1612 es->collisions += 1725 es->collisions +=
1613 IO_EXTRACT(R_TR_COUNTERS, single_col, r) + 1726 IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
1614 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); 1727 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
1615 es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
1616} 1728}
1617 1729
1618/* 1730/*
@@ -1622,8 +1734,9 @@ update_tx_stats(struct net_device_stats *es)
1622static struct net_device_stats * 1734static struct net_device_stats *
1623e100_get_stats(struct net_device *dev) 1735e100_get_stats(struct net_device *dev)
1624{ 1736{
1625 struct net_local *lp = (struct net_local *)dev->priv; 1737 struct net_local *lp = netdev_priv(dev);
1626 unsigned long flags; 1738 unsigned long flags;
1739
1627 spin_lock_irqsave(&lp->lock, flags); 1740 spin_lock_irqsave(&lp->lock, flags);
1628 1741
1629 update_rx_stats(&lp->stats); 1742 update_rx_stats(&lp->stats);
@@ -1643,13 +1756,13 @@ e100_get_stats(struct net_device *dev)
1643static void 1756static void
1644set_multicast_list(struct net_device *dev) 1757set_multicast_list(struct net_device *dev)
1645{ 1758{
1646 struct net_local *lp = (struct net_local *)dev->priv; 1759 struct net_local *lp = netdev_priv(dev);
1647 int num_addr = dev->mc_count; 1760 int num_addr = dev->mc_count;
1648 unsigned long int lo_bits; 1761 unsigned long int lo_bits;
1649 unsigned long int hi_bits; 1762 unsigned long int hi_bits;
1763
1650 spin_lock(&lp->lock); 1764 spin_lock(&lp->lock);
1651 if (dev->flags & IFF_PROMISC) 1765 if (dev->flags & IFF_PROMISC) {
1652 {
1653 /* promiscuous mode */ 1766 /* promiscuous mode */
1654 lo_bits = 0xfffffffful; 1767 lo_bits = 0xfffffffful;
1655 hi_bits = 0xfffffffful; 1768 hi_bits = 0xfffffffful;
@@ -1679,9 +1792,10 @@ set_multicast_list(struct net_device *dev)
1679 struct dev_mc_list *dmi = dev->mc_list; 1792 struct dev_mc_list *dmi = dev->mc_list;
1680 int i; 1793 int i;
1681 char *baddr; 1794 char *baddr;
1795
1682 lo_bits = 0x00000000ul; 1796 lo_bits = 0x00000000ul;
1683 hi_bits = 0x00000000ul; 1797 hi_bits = 0x00000000ul;
1684 for (i=0; i<num_addr; i++) { 1798 for (i = 0; i < num_addr; i++) {
1685 /* Calculate the hash index for the GA registers */ 1799 /* Calculate the hash index for the GA registers */
1686 1800
1687 hash_ix = 0; 1801 hash_ix = 0;
@@ -1708,8 +1822,7 @@ set_multicast_list(struct net_device *dev)
1708 1822
1709 if (hash_ix >= 32) { 1823 if (hash_ix >= 32) {
1710 hi_bits |= (1 << (hash_ix-32)); 1824 hi_bits |= (1 << (hash_ix-32));
1711 } 1825 } else {
1712 else {
1713 lo_bits |= (1 << hash_ix); 1826 lo_bits |= (1 << hash_ix);
1714 } 1827 }
1715 dmi = dmi->next; 1828 dmi = dmi->next;
@@ -1724,10 +1837,11 @@ set_multicast_list(struct net_device *dev)
1724} 1837}
1725 1838
1726void 1839void
1727e100_hardware_send_packet(char *buf, int length) 1840e100_hardware_send_packet(struct net_local *np, char *buf, int length)
1728{ 1841{
1729 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); 1842 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
1730 1843
1844 spin_lock(&np->led_lock);
1731 if (!led_active && time_after(jiffies, led_next_time)) { 1845 if (!led_active && time_after(jiffies, led_next_time)) {
1732 /* light the network leds depending on the current speed. */ 1846 /* light the network leds depending on the current speed. */
1733 e100_set_network_leds(NETWORK_ACTIVITY); 1847 e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1737,6 +1851,7 @@ e100_hardware_send_packet(char *buf, int length)
1737 led_active = 1; 1851 led_active = 1;
1738 mod_timer(&clear_led_timer, jiffies + HZ/10); 1852 mod_timer(&clear_led_timer, jiffies + HZ/10);
1739 } 1853 }
1854 spin_unlock(&np->led_lock);
1740 1855
1741 /* configure the tx dma descriptor */ 1856 /* configure the tx dma descriptor */
1742 myNextTxDesc->descr.sw_len = length; 1857 myNextTxDesc->descr.sw_len = length;
@@ -1754,6 +1869,11 @@ e100_hardware_send_packet(char *buf, int length)
1754static void 1869static void
1755e100_clear_network_leds(unsigned long dummy) 1870e100_clear_network_leds(unsigned long dummy)
1756{ 1871{
1872 struct net_device *dev = (struct net_device *)dummy;
1873 struct net_local *np = netdev_priv(dev);
1874
1875 spin_lock(&np->led_lock);
1876
1757 if (led_active && time_after(jiffies, led_next_time)) { 1877 if (led_active && time_after(jiffies, led_next_time)) {
1758 e100_set_network_leds(NO_NETWORK_ACTIVITY); 1878 e100_set_network_leds(NO_NETWORK_ACTIVITY);
1759 1879
@@ -1761,6 +1881,8 @@ e100_clear_network_leds(unsigned long dummy)
1761 led_next_time = jiffies + NET_FLASH_PAUSE; 1881 led_next_time = jiffies + NET_FLASH_PAUSE;
1762 led_active = 0; 1882 led_active = 0;
1763 } 1883 }
1884
1885 spin_unlock(&np->led_lock);
1764} 1886}
1765 1887
1766static void 1888static void
@@ -1781,19 +1903,25 @@ e100_set_network_leds(int active)
1781#else 1903#else
1782 LED_NETWORK_SET(LED_OFF); 1904 LED_NETWORK_SET(LED_OFF);
1783#endif 1905#endif
1784 } 1906 } else if (light_leds) {
1785 else if (light_leds) {
1786 if (current_speed == 10) { 1907 if (current_speed == 10) {
1787 LED_NETWORK_SET(LED_ORANGE); 1908 LED_NETWORK_SET(LED_ORANGE);
1788 } else { 1909 } else {
1789 LED_NETWORK_SET(LED_GREEN); 1910 LED_NETWORK_SET(LED_GREEN);
1790 } 1911 }
1791 } 1912 } else {
1792 else {
1793 LED_NETWORK_SET(LED_OFF); 1913 LED_NETWORK_SET(LED_OFF);
1794 } 1914 }
1795} 1915}
1796 1916
1917#ifdef CONFIG_NET_POLL_CONTROLLER
1918static void
1919e100_netpoll(struct net_device* netdev)
1920{
1921 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
1922}
1923#endif
1924
1797static int 1925static int
1798etrax_init_module(void) 1926etrax_init_module(void)
1799{ 1927{