diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 1761 |
1 files changed, 1441 insertions, 320 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5a8651b4b01d..04a53f1dfdbd 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -107,6 +107,8 @@ | |||
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). | ||
111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. | ||
110 | * | 112 | * |
111 | * Known bugs: | 113 | * Known bugs: |
112 | * We suspect that on some hardware no TX done interrupts are generated. | 114 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -118,7 +120,7 @@ | |||
118 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 120 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
119 | * superfluous timer interrupts from the nic. | 121 | * superfluous timer interrupts from the nic. |
120 | */ | 122 | */ |
121 | #define FORCEDETH_VERSION "0.54" | 123 | #define FORCEDETH_VERSION "0.56" |
122 | #define DRV_NAME "forcedeth" | 124 | #define DRV_NAME "forcedeth" |
123 | 125 | ||
124 | #include <linux/module.h> | 126 | #include <linux/module.h> |
@@ -163,6 +165,9 @@ | |||
163 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | 165 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ |
164 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | 166 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ |
165 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ | 167 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
168 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ | ||
169 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ | ||
170 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ | ||
166 | 171 | ||
167 | enum { | 172 | enum { |
168 | NvRegIrqStatus = 0x000, | 173 | NvRegIrqStatus = 0x000, |
@@ -203,6 +208,7 @@ enum { | |||
203 | NvRegMSIIrqMask = 0x030, | 208 | NvRegMSIIrqMask = 0x030, |
204 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | 209 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 |
205 | NvRegMisc1 = 0x080, | 210 | NvRegMisc1 = 0x080, |
211 | #define NVREG_MISC1_PAUSE_TX 0x01 | ||
206 | #define NVREG_MISC1_HD 0x02 | 212 | #define NVREG_MISC1_HD 0x02 |
207 | #define NVREG_MISC1_FORCE 0x3b0f3c | 213 | #define NVREG_MISC1_FORCE 0x3b0f3c |
208 | 214 | ||
@@ -214,9 +220,11 @@ enum { | |||
214 | #define NVREG_XMITSTAT_BUSY 0x01 | 220 | #define NVREG_XMITSTAT_BUSY 0x01 |
215 | 221 | ||
216 | NvRegPacketFilterFlags = 0x8c, | 222 | NvRegPacketFilterFlags = 0x8c, |
217 | #define NVREG_PFF_ALWAYS 0x7F0008 | 223 | #define NVREG_PFF_PAUSE_RX 0x08 |
224 | #define NVREG_PFF_ALWAYS 0x7F0000 | ||
218 | #define NVREG_PFF_PROMISC 0x80 | 225 | #define NVREG_PFF_PROMISC 0x80 |
219 | #define NVREG_PFF_MYADDR 0x20 | 226 | #define NVREG_PFF_MYADDR 0x20 |
227 | #define NVREG_PFF_LOOPBACK 0x10 | ||
220 | 228 | ||
221 | NvRegOffloadConfig = 0x90, | 229 | NvRegOffloadConfig = 0x90, |
222 | #define NVREG_OFFLOAD_HOMEPHY 0x601 | 230 | #define NVREG_OFFLOAD_HOMEPHY 0x601 |
@@ -277,6 +285,9 @@ enum { | |||
277 | #define NVREG_TXRXCTL_VLANINS 0x00080 | 285 | #define NVREG_TXRXCTL_VLANINS 0x00080 |
278 | NvRegTxRingPhysAddrHigh = 0x148, | 286 | NvRegTxRingPhysAddrHigh = 0x148, |
279 | NvRegRxRingPhysAddrHigh = 0x14C, | 287 | NvRegRxRingPhysAddrHigh = 0x14C, |
288 | NvRegTxPauseFrame = 0x170, | ||
289 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | ||
290 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | ||
280 | NvRegMIIStatus = 0x180, | 291 | NvRegMIIStatus = 0x180, |
281 | #define NVREG_MIISTAT_ERROR 0x0001 | 292 | #define NVREG_MIISTAT_ERROR 0x0001 |
282 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 293 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -326,6 +337,33 @@ enum { | |||
326 | #define NVREG_POWERSTATE_D1 0x0001 | 337 | #define NVREG_POWERSTATE_D1 0x0001 |
327 | #define NVREG_POWERSTATE_D2 0x0002 | 338 | #define NVREG_POWERSTATE_D2 0x0002 |
328 | #define NVREG_POWERSTATE_D3 0x0003 | 339 | #define NVREG_POWERSTATE_D3 0x0003 |
340 | NvRegTxCnt = 0x280, | ||
341 | NvRegTxZeroReXmt = 0x284, | ||
342 | NvRegTxOneReXmt = 0x288, | ||
343 | NvRegTxManyReXmt = 0x28c, | ||
344 | NvRegTxLateCol = 0x290, | ||
345 | NvRegTxUnderflow = 0x294, | ||
346 | NvRegTxLossCarrier = 0x298, | ||
347 | NvRegTxExcessDef = 0x29c, | ||
348 | NvRegTxRetryErr = 0x2a0, | ||
349 | NvRegRxFrameErr = 0x2a4, | ||
350 | NvRegRxExtraByte = 0x2a8, | ||
351 | NvRegRxLateCol = 0x2ac, | ||
352 | NvRegRxRunt = 0x2b0, | ||
353 | NvRegRxFrameTooLong = 0x2b4, | ||
354 | NvRegRxOverflow = 0x2b8, | ||
355 | NvRegRxFCSErr = 0x2bc, | ||
356 | NvRegRxFrameAlignErr = 0x2c0, | ||
357 | NvRegRxLenErr = 0x2c4, | ||
358 | NvRegRxUnicast = 0x2c8, | ||
359 | NvRegRxMulticast = 0x2cc, | ||
360 | NvRegRxBroadcast = 0x2d0, | ||
361 | NvRegTxDef = 0x2d4, | ||
362 | NvRegTxFrame = 0x2d8, | ||
363 | NvRegRxCnt = 0x2dc, | ||
364 | NvRegTxPause = 0x2e0, | ||
365 | NvRegRxPause = 0x2e4, | ||
366 | NvRegRxDropFrame = 0x2e8, | ||
329 | NvRegVlanControl = 0x300, | 367 | NvRegVlanControl = 0x300, |
330 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | 368 | #define NVREG_VLANCONTROL_ENABLE 0x2000 |
331 | NvRegMSIXMap0 = 0x3e0, | 369 | NvRegMSIXMap0 = 0x3e0, |
@@ -449,16 +487,18 @@ typedef union _ring_type { | |||
449 | /* General driver defaults */ | 487 | /* General driver defaults */ |
450 | #define NV_WATCHDOG_TIMEO (5*HZ) | 488 | #define NV_WATCHDOG_TIMEO (5*HZ) |
451 | 489 | ||
452 | #define RX_RING 128 | 490 | #define RX_RING_DEFAULT 128 |
453 | #define TX_RING 256 | 491 | #define TX_RING_DEFAULT 256 |
454 | /* | 492 | #define RX_RING_MIN 128 |
455 | * If your nic mysteriously hangs then try to reduce the limits | 493 | #define TX_RING_MIN 64 |
456 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the | 494 | #define RING_MAX_DESC_VER_1 1024 |
457 | * last valid ring entry. But this would be impossible to | 495 | #define RING_MAX_DESC_VER_2_3 16384 |
458 | * implement - probably a disassembly error. | 496 | /* |
497 | * Difference between the get and put pointers for the tx ring. | ||
498 | * This is used to throttle the amount of data outstanding in the | ||
499 | * tx ring. | ||
459 | */ | 500 | */ |
460 | #define TX_LIMIT_STOP 255 | 501 | #define TX_LIMIT_DIFFERENCE 1 |
461 | #define TX_LIMIT_START 254 | ||
462 | 502 | ||
463 | /* rx/tx mac addr + type + vlan + align + slack*/ | 503 | /* rx/tx mac addr + type + vlan + align + slack*/ |
464 | #define NV_RX_HEADERS (64) | 504 | #define NV_RX_HEADERS (64) |
@@ -472,8 +512,9 @@ typedef union _ring_type { | |||
472 | #define OOM_REFILL (1+HZ/20) | 512 | #define OOM_REFILL (1+HZ/20) |
473 | #define POLL_WAIT (1+HZ/100) | 513 | #define POLL_WAIT (1+HZ/100) |
474 | #define LINK_TIMEOUT (3*HZ) | 514 | #define LINK_TIMEOUT (3*HZ) |
515 | #define STATS_INTERVAL (10*HZ) | ||
475 | 516 | ||
476 | /* | 517 | /* |
477 | * desc_ver values: | 518 | * desc_ver values: |
478 | * The nic supports three different descriptor types: | 519 | * The nic supports three different descriptor types: |
479 | * - DESC_VER_1: Original | 520 | * - DESC_VER_1: Original |
@@ -506,13 +547,13 @@ typedef union _ring_type { | |||
506 | #define PHY_1000 0x2 | 547 | #define PHY_1000 0x2 |
507 | #define PHY_HALF 0x100 | 548 | #define PHY_HALF 0x100 |
508 | 549 | ||
509 | /* FIXME: MII defines that should be added to <linux/mii.h> */ | 550 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
510 | #define MII_1000BT_CR 0x09 | 551 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 |
511 | #define MII_1000BT_SR 0x0a | 552 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 |
512 | #define ADVERTISE_1000FULL 0x0200 | 553 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 |
513 | #define ADVERTISE_1000HALF 0x0100 | 554 | #define NV_PAUSEFRAME_RX_REQ 0x0010 |
514 | #define LPA_1000FULL 0x0800 | 555 | #define NV_PAUSEFRAME_TX_REQ 0x0020 |
515 | #define LPA_1000HALF 0x0400 | 556 | #define NV_PAUSEFRAME_AUTONEG 0x0040 |
516 | 557 | ||
517 | /* MSI/MSI-X defines */ | 558 | /* MSI/MSI-X defines */ |
518 | #define NV_MSI_X_MAX_VECTORS 8 | 559 | #define NV_MSI_X_MAX_VECTORS 8 |
@@ -527,6 +568,101 @@ typedef union _ring_type { | |||
527 | #define NV_MSI_X_VECTOR_TX 0x1 | 568 | #define NV_MSI_X_VECTOR_TX 0x1 |
528 | #define NV_MSI_X_VECTOR_OTHER 0x2 | 569 | #define NV_MSI_X_VECTOR_OTHER 0x2 |
529 | 570 | ||
571 | /* statistics */ | ||
572 | struct nv_ethtool_str { | ||
573 | char name[ETH_GSTRING_LEN]; | ||
574 | }; | ||
575 | |||
576 | static const struct nv_ethtool_str nv_estats_str[] = { | ||
577 | { "tx_bytes" }, | ||
578 | { "tx_zero_rexmt" }, | ||
579 | { "tx_one_rexmt" }, | ||
580 | { "tx_many_rexmt" }, | ||
581 | { "tx_late_collision" }, | ||
582 | { "tx_fifo_errors" }, | ||
583 | { "tx_carrier_errors" }, | ||
584 | { "tx_excess_deferral" }, | ||
585 | { "tx_retry_error" }, | ||
586 | { "tx_deferral" }, | ||
587 | { "tx_packets" }, | ||
588 | { "tx_pause" }, | ||
589 | { "rx_frame_error" }, | ||
590 | { "rx_extra_byte" }, | ||
591 | { "rx_late_collision" }, | ||
592 | { "rx_runt" }, | ||
593 | { "rx_frame_too_long" }, | ||
594 | { "rx_over_errors" }, | ||
595 | { "rx_crc_errors" }, | ||
596 | { "rx_frame_align_error" }, | ||
597 | { "rx_length_error" }, | ||
598 | { "rx_unicast" }, | ||
599 | { "rx_multicast" }, | ||
600 | { "rx_broadcast" }, | ||
601 | { "rx_bytes" }, | ||
602 | { "rx_pause" }, | ||
603 | { "rx_drop_frame" }, | ||
604 | { "rx_packets" }, | ||
605 | { "rx_errors_total" } | ||
606 | }; | ||
607 | |||
608 | struct nv_ethtool_stats { | ||
609 | u64 tx_bytes; | ||
610 | u64 tx_zero_rexmt; | ||
611 | u64 tx_one_rexmt; | ||
612 | u64 tx_many_rexmt; | ||
613 | u64 tx_late_collision; | ||
614 | u64 tx_fifo_errors; | ||
615 | u64 tx_carrier_errors; | ||
616 | u64 tx_excess_deferral; | ||
617 | u64 tx_retry_error; | ||
618 | u64 tx_deferral; | ||
619 | u64 tx_packets; | ||
620 | u64 tx_pause; | ||
621 | u64 rx_frame_error; | ||
622 | u64 rx_extra_byte; | ||
623 | u64 rx_late_collision; | ||
624 | u64 rx_runt; | ||
625 | u64 rx_frame_too_long; | ||
626 | u64 rx_over_errors; | ||
627 | u64 rx_crc_errors; | ||
628 | u64 rx_frame_align_error; | ||
629 | u64 rx_length_error; | ||
630 | u64 rx_unicast; | ||
631 | u64 rx_multicast; | ||
632 | u64 rx_broadcast; | ||
633 | u64 rx_bytes; | ||
634 | u64 rx_pause; | ||
635 | u64 rx_drop_frame; | ||
636 | u64 rx_packets; | ||
637 | u64 rx_errors_total; | ||
638 | }; | ||
639 | |||
640 | /* diagnostics */ | ||
641 | #define NV_TEST_COUNT_BASE 3 | ||
642 | #define NV_TEST_COUNT_EXTENDED 4 | ||
643 | |||
644 | static const struct nv_ethtool_str nv_etests_str[] = { | ||
645 | { "link (online/offline)" }, | ||
646 | { "register (offline) " }, | ||
647 | { "interrupt (offline) " }, | ||
648 | { "loopback (offline) " } | ||
649 | }; | ||
650 | |||
651 | struct register_test { | ||
652 | u32 reg; | ||
653 | u32 mask; | ||
654 | }; | ||
655 | |||
656 | static const struct register_test nv_registers_test[] = { | ||
657 | { NvRegUnknownSetupReg6, 0x01 }, | ||
658 | { NvRegMisc1, 0x03c }, | ||
659 | { NvRegOffloadConfig, 0x03ff }, | ||
660 | { NvRegMulticastAddrA, 0xffffffff }, | ||
661 | { NvRegUnknownSetupReg3, 0x0ff }, | ||
662 | { NvRegWakeUpFlags, 0x07777 }, | ||
663 | { 0,0 } | ||
664 | }; | ||
665 | |||
530 | /* | 666 | /* |
531 | * SMP locking: | 667 | * SMP locking: |
532 | * All hardware access under dev->priv->lock, except the performance | 668 | * All hardware access under dev->priv->lock, except the performance |
@@ -545,6 +681,7 @@ struct fe_priv { | |||
545 | /* General data: | 681 | /* General data: |
546 | * Locking: spin_lock(&np->lock); */ | 682 | * Locking: spin_lock(&np->lock); */ |
547 | struct net_device_stats stats; | 683 | struct net_device_stats stats; |
684 | struct nv_ethtool_stats estats; | ||
548 | int in_shutdown; | 685 | int in_shutdown; |
549 | u32 linkspeed; | 686 | u32 linkspeed; |
550 | int duplex; | 687 | int duplex; |
@@ -554,6 +691,7 @@ struct fe_priv { | |||
554 | int wolenabled; | 691 | int wolenabled; |
555 | unsigned int phy_oui; | 692 | unsigned int phy_oui; |
556 | u16 gigabit; | 693 | u16 gigabit; |
694 | int intr_test; | ||
557 | 695 | ||
558 | /* General data: RO fields */ | 696 | /* General data: RO fields */ |
559 | dma_addr_t ring_addr; | 697 | dma_addr_t ring_addr; |
@@ -573,13 +711,15 @@ struct fe_priv { | |||
573 | */ | 711 | */ |
574 | ring_type rx_ring; | 712 | ring_type rx_ring; |
575 | unsigned int cur_rx, refill_rx; | 713 | unsigned int cur_rx, refill_rx; |
576 | struct sk_buff *rx_skbuff[RX_RING]; | 714 | struct sk_buff **rx_skbuff; |
577 | dma_addr_t rx_dma[RX_RING]; | 715 | dma_addr_t *rx_dma; |
578 | unsigned int rx_buf_sz; | 716 | unsigned int rx_buf_sz; |
579 | unsigned int pkt_limit; | 717 | unsigned int pkt_limit; |
580 | struct timer_list oom_kick; | 718 | struct timer_list oom_kick; |
581 | struct timer_list nic_poll; | 719 | struct timer_list nic_poll; |
720 | struct timer_list stats_poll; | ||
582 | u32 nic_poll_irq; | 721 | u32 nic_poll_irq; |
722 | int rx_ring_size; | ||
583 | 723 | ||
584 | /* media detection workaround. | 724 | /* media detection workaround. |
585 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 725 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
@@ -591,10 +731,13 @@ struct fe_priv { | |||
591 | */ | 731 | */ |
592 | ring_type tx_ring; | 732 | ring_type tx_ring; |
593 | unsigned int next_tx, nic_tx; | 733 | unsigned int next_tx, nic_tx; |
594 | struct sk_buff *tx_skbuff[TX_RING]; | 734 | struct sk_buff **tx_skbuff; |
595 | dma_addr_t tx_dma[TX_RING]; | 735 | dma_addr_t *tx_dma; |
596 | unsigned int tx_dma_len[TX_RING]; | 736 | unsigned int *tx_dma_len; |
597 | u32 tx_flags; | 737 | u32 tx_flags; |
738 | int tx_ring_size; | ||
739 | int tx_limit_start; | ||
740 | int tx_limit_stop; | ||
598 | 741 | ||
599 | /* vlan fields */ | 742 | /* vlan fields */ |
600 | struct vlan_group *vlangrp; | 743 | struct vlan_group *vlangrp; |
@@ -602,6 +745,9 @@ struct fe_priv { | |||
602 | /* msi/msi-x fields */ | 745 | /* msi/msi-x fields */ |
603 | u32 msi_flags; | 746 | u32 msi_flags; |
604 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | 747 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; |
748 | |||
749 | /* flow control */ | ||
750 | u32 pause_flags; | ||
605 | }; | 751 | }; |
606 | 752 | ||
607 | /* | 753 | /* |
@@ -612,12 +758,14 @@ static int max_interrupt_work = 5; | |||
612 | 758 | ||
613 | /* | 759 | /* |
614 | * Optimization can be either throuput mode or cpu mode | 760 | * Optimization can be either throuput mode or cpu mode |
615 | * | 761 | * |
616 | * Throughput Mode: Every tx and rx packet will generate an interrupt. | 762 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
617 | * CPU Mode: Interrupts are controlled by a timer. | 763 | * CPU Mode: Interrupts are controlled by a timer. |
618 | */ | 764 | */ |
619 | #define NV_OPTIMIZATION_MODE_THROUGHPUT 0 | 765 | enum { |
620 | #define NV_OPTIMIZATION_MODE_CPU 1 | 766 | NV_OPTIMIZATION_MODE_THROUGHPUT, |
767 | NV_OPTIMIZATION_MODE_CPU | ||
768 | }; | ||
621 | static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | 769 | static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; |
622 | 770 | ||
623 | /* | 771 | /* |
@@ -630,14 +778,31 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |||
630 | static int poll_interval = -1; | 778 | static int poll_interval = -1; |
631 | 779 | ||
632 | /* | 780 | /* |
633 | * Disable MSI interrupts | 781 | * MSI interrupts |
782 | */ | ||
783 | enum { | ||
784 | NV_MSI_INT_DISABLED, | ||
785 | NV_MSI_INT_ENABLED | ||
786 | }; | ||
787 | static int msi = NV_MSI_INT_ENABLED; | ||
788 | |||
789 | /* | ||
790 | * MSIX interrupts | ||
634 | */ | 791 | */ |
635 | static int disable_msi = 0; | 792 | enum { |
793 | NV_MSIX_INT_DISABLED, | ||
794 | NV_MSIX_INT_ENABLED | ||
795 | }; | ||
796 | static int msix = NV_MSIX_INT_ENABLED; | ||
636 | 797 | ||
637 | /* | 798 | /* |
638 | * Disable MSIX interrupts | 799 | * DMA 64bit |
639 | */ | 800 | */ |
640 | static int disable_msix = 0; | 801 | enum { |
802 | NV_DMA_64BIT_DISABLED, | ||
803 | NV_DMA_64BIT_ENABLED | ||
804 | }; | ||
805 | static int dma_64bit = NV_DMA_64BIT_ENABLED; | ||
641 | 806 | ||
642 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) | 807 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
643 | { | 808 | { |
@@ -697,7 +862,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
697 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | 862 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); |
698 | } | 863 | } |
699 | if (rxtx_flags & NV_SETUP_TX_RING) { | 864 | if (rxtx_flags & NV_SETUP_TX_RING) { |
700 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 865 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); |
701 | } | 866 | } |
702 | } else { | 867 | } else { |
703 | if (rxtx_flags & NV_SETUP_RX_RING) { | 868 | if (rxtx_flags & NV_SETUP_RX_RING) { |
@@ -705,12 +870,37 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
705 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | 870 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); |
706 | } | 871 | } |
707 | if (rxtx_flags & NV_SETUP_TX_RING) { | 872 | if (rxtx_flags & NV_SETUP_TX_RING) { |
708 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | 873 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); |
709 | writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | 874 | writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); |
710 | } | 875 | } |
711 | } | 876 | } |
712 | } | 877 | } |
713 | 878 | ||
879 | static void free_rings(struct net_device *dev) | ||
880 | { | ||
881 | struct fe_priv *np = get_nvpriv(dev); | ||
882 | |||
883 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
884 | if(np->rx_ring.orig) | ||
885 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | ||
886 | np->rx_ring.orig, np->ring_addr); | ||
887 | } else { | ||
888 | if (np->rx_ring.ex) | ||
889 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | ||
890 | np->rx_ring.ex, np->ring_addr); | ||
891 | } | ||
892 | if (np->rx_skbuff) | ||
893 | kfree(np->rx_skbuff); | ||
894 | if (np->rx_dma) | ||
895 | kfree(np->rx_dma); | ||
896 | if (np->tx_skbuff) | ||
897 | kfree(np->tx_skbuff); | ||
898 | if (np->tx_dma) | ||
899 | kfree(np->tx_dma); | ||
900 | if (np->tx_dma_len) | ||
901 | kfree(np->tx_dma_len); | ||
902 | } | ||
903 | |||
714 | static int using_multi_irqs(struct net_device *dev) | 904 | static int using_multi_irqs(struct net_device *dev) |
715 | { | 905 | { |
716 | struct fe_priv *np = get_nvpriv(dev); | 906 | struct fe_priv *np = get_nvpriv(dev); |
@@ -860,7 +1050,7 @@ static int phy_init(struct net_device *dev) | |||
860 | 1050 | ||
861 | /* set advertise register */ | 1051 | /* set advertise register */ |
862 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 1052 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
863 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); | 1053 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
864 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { | 1054 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { |
865 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); | 1055 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); |
866 | return PHY_ERROR; | 1056 | return PHY_ERROR; |
@@ -873,14 +1063,14 @@ static int phy_init(struct net_device *dev) | |||
873 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | 1063 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); |
874 | if (mii_status & PHY_GIGABIT) { | 1064 | if (mii_status & PHY_GIGABIT) { |
875 | np->gigabit = PHY_GIGABIT; | 1065 | np->gigabit = PHY_GIGABIT; |
876 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 1066 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
877 | mii_control_1000 &= ~ADVERTISE_1000HALF; | 1067 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
878 | if (phyinterface & PHY_RGMII) | 1068 | if (phyinterface & PHY_RGMII) |
879 | mii_control_1000 |= ADVERTISE_1000FULL; | 1069 | mii_control_1000 |= ADVERTISE_1000FULL; |
880 | else | 1070 | else |
881 | mii_control_1000 &= ~ADVERTISE_1000FULL; | 1071 | mii_control_1000 &= ~ADVERTISE_1000FULL; |
882 | 1072 | ||
883 | if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { | 1073 | if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { |
884 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | 1074 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); |
885 | return PHY_ERROR; | 1075 | return PHY_ERROR; |
886 | } | 1076 | } |
@@ -918,6 +1108,8 @@ static int phy_init(struct net_device *dev) | |||
918 | return PHY_ERROR; | 1108 | return PHY_ERROR; |
919 | } | 1109 | } |
920 | } | 1110 | } |
1111 | /* some phys clear out pause advertisment on reset, set it back */ | ||
1112 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); | ||
921 | 1113 | ||
922 | /* restart auto negotiation */ | 1114 | /* restart auto negotiation */ |
923 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 1115 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
@@ -1047,7 +1239,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1047 | while (np->cur_rx != refill_rx) { | 1239 | while (np->cur_rx != refill_rx) { |
1048 | struct sk_buff *skb; | 1240 | struct sk_buff *skb; |
1049 | 1241 | ||
1050 | nr = refill_rx % RX_RING; | 1242 | nr = refill_rx % np->rx_ring_size; |
1051 | if (np->rx_skbuff[nr] == NULL) { | 1243 | if (np->rx_skbuff[nr] == NULL) { |
1052 | 1244 | ||
1053 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1245 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
@@ -1076,7 +1268,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1076 | refill_rx++; | 1268 | refill_rx++; |
1077 | } | 1269 | } |
1078 | np->refill_rx = refill_rx; | 1270 | np->refill_rx = refill_rx; |
1079 | if (np->cur_rx - refill_rx == RX_RING) | 1271 | if (np->cur_rx - refill_rx == np->rx_ring_size) |
1080 | return 1; | 1272 | return 1; |
1081 | return 0; | 1273 | return 0; |
1082 | } | 1274 | } |
@@ -1110,14 +1302,14 @@ static void nv_do_rx_refill(unsigned long data) | |||
1110 | } | 1302 | } |
1111 | } | 1303 | } |
1112 | 1304 | ||
1113 | static void nv_init_rx(struct net_device *dev) | 1305 | static void nv_init_rx(struct net_device *dev) |
1114 | { | 1306 | { |
1115 | struct fe_priv *np = netdev_priv(dev); | 1307 | struct fe_priv *np = netdev_priv(dev); |
1116 | int i; | 1308 | int i; |
1117 | 1309 | ||
1118 | np->cur_rx = RX_RING; | 1310 | np->cur_rx = np->rx_ring_size; |
1119 | np->refill_rx = 0; | 1311 | np->refill_rx = 0; |
1120 | for (i = 0; i < RX_RING; i++) | 1312 | for (i = 0; i < np->rx_ring_size; i++) |
1121 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1313 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1122 | np->rx_ring.orig[i].FlagLen = 0; | 1314 | np->rx_ring.orig[i].FlagLen = 0; |
1123 | else | 1315 | else |
@@ -1130,7 +1322,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1130 | int i; | 1322 | int i; |
1131 | 1323 | ||
1132 | np->next_tx = np->nic_tx = 0; | 1324 | np->next_tx = np->nic_tx = 0; |
1133 | for (i = 0; i < TX_RING; i++) { | 1325 | for (i = 0; i < np->tx_ring_size; i++) { |
1134 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1326 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1135 | np->tx_ring.orig[i].FlagLen = 0; | 1327 | np->tx_ring.orig[i].FlagLen = 0; |
1136 | else | 1328 | else |
@@ -1174,8 +1366,8 @@ static void nv_drain_tx(struct net_device *dev) | |||
1174 | { | 1366 | { |
1175 | struct fe_priv *np = netdev_priv(dev); | 1367 | struct fe_priv *np = netdev_priv(dev); |
1176 | unsigned int i; | 1368 | unsigned int i; |
1177 | 1369 | ||
1178 | for (i = 0; i < TX_RING; i++) { | 1370 | for (i = 0; i < np->tx_ring_size; i++) { |
1179 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1371 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1180 | np->tx_ring.orig[i].FlagLen = 0; | 1372 | np->tx_ring.orig[i].FlagLen = 0; |
1181 | else | 1373 | else |
@@ -1189,7 +1381,7 @@ static void nv_drain_rx(struct net_device *dev) | |||
1189 | { | 1381 | { |
1190 | struct fe_priv *np = netdev_priv(dev); | 1382 | struct fe_priv *np = netdev_priv(dev); |
1191 | int i; | 1383 | int i; |
1192 | for (i = 0; i < RX_RING; i++) { | 1384 | for (i = 0; i < np->rx_ring_size; i++) { |
1193 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1385 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1194 | np->rx_ring.orig[i].FlagLen = 0; | 1386 | np->rx_ring.orig[i].FlagLen = 0; |
1195 | else | 1387 | else |
@@ -1221,8 +1413,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1221 | u32 tx_flags = 0; | 1413 | u32 tx_flags = 0; |
1222 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | 1414 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1223 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | 1415 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
1224 | unsigned int nr = (np->next_tx - 1) % TX_RING; | 1416 | unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; |
1225 | unsigned int start_nr = np->next_tx % TX_RING; | 1417 | unsigned int start_nr = np->next_tx % np->tx_ring_size; |
1226 | unsigned int i; | 1418 | unsigned int i; |
1227 | u32 offset = 0; | 1419 | u32 offset = 0; |
1228 | u32 bcnt; | 1420 | u32 bcnt; |
@@ -1238,7 +1430,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1238 | 1430 | ||
1239 | spin_lock_irq(&np->lock); | 1431 | spin_lock_irq(&np->lock); |
1240 | 1432 | ||
1241 | if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) { | 1433 | if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { |
1242 | spin_unlock_irq(&np->lock); | 1434 | spin_unlock_irq(&np->lock); |
1243 | netif_stop_queue(dev); | 1435 | netif_stop_queue(dev); |
1244 | return NETDEV_TX_BUSY; | 1436 | return NETDEV_TX_BUSY; |
@@ -1247,7 +1439,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1247 | /* setup the header buffer */ | 1439 | /* setup the header buffer */ |
1248 | do { | 1440 | do { |
1249 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1441 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1250 | nr = (nr + 1) % TX_RING; | 1442 | nr = (nr + 1) % np->tx_ring_size; |
1251 | 1443 | ||
1252 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | 1444 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
1253 | PCI_DMA_TODEVICE); | 1445 | PCI_DMA_TODEVICE); |
@@ -1274,7 +1466,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1274 | 1466 | ||
1275 | do { | 1467 | do { |
1276 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1468 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1277 | nr = (nr + 1) % TX_RING; | 1469 | nr = (nr + 1) % np->tx_ring_size; |
1278 | 1470 | ||
1279 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | 1471 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1280 | PCI_DMA_TODEVICE); | 1472 | PCI_DMA_TODEVICE); |
@@ -1320,7 +1512,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1320 | } else { | 1512 | } else { |
1321 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | 1513 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); |
1322 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1514 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1323 | } | 1515 | } |
1324 | 1516 | ||
1325 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", | 1517 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
1326 | dev->name, np->next_tx, entries, tx_flags_extra); | 1518 | dev->name, np->next_tx, entries, tx_flags_extra); |
@@ -1356,7 +1548,7 @@ static void nv_tx_done(struct net_device *dev) | |||
1356 | struct sk_buff *skb; | 1548 | struct sk_buff *skb; |
1357 | 1549 | ||
1358 | while (np->nic_tx != np->next_tx) { | 1550 | while (np->nic_tx != np->next_tx) { |
1359 | i = np->nic_tx % TX_RING; | 1551 | i = np->nic_tx % np->tx_ring_size; |
1360 | 1552 | ||
1361 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1553 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1362 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | 1554 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); |
@@ -1395,13 +1587,13 @@ static void nv_tx_done(struct net_device *dev) | |||
1395 | } else { | 1587 | } else { |
1396 | np->stats.tx_packets++; | 1588 | np->stats.tx_packets++; |
1397 | np->stats.tx_bytes += skb->len; | 1589 | np->stats.tx_bytes += skb->len; |
1398 | } | 1590 | } |
1399 | } | 1591 | } |
1400 | } | 1592 | } |
1401 | nv_release_txskb(dev, i); | 1593 | nv_release_txskb(dev, i); |
1402 | np->nic_tx++; | 1594 | np->nic_tx++; |
1403 | } | 1595 | } |
1404 | if (np->next_tx - np->nic_tx < TX_LIMIT_START) | 1596 | if (np->next_tx - np->nic_tx < np->tx_limit_start) |
1405 | netif_wake_queue(dev); | 1597 | netif_wake_queue(dev); |
1406 | } | 1598 | } |
1407 | 1599 | ||
@@ -1438,10 +1630,10 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1438 | readl(base + i + 24), readl(base + i + 28)); | 1630 | readl(base + i + 24), readl(base + i + 28)); |
1439 | } | 1631 | } |
1440 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | 1632 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); |
1441 | for (i=0;i<TX_RING;i+= 4) { | 1633 | for (i=0;i<np->tx_ring_size;i+= 4) { |
1442 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1634 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1443 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1635 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1444 | i, | 1636 | i, |
1445 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), | 1637 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), |
1446 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | 1638 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), |
1447 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | 1639 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), |
@@ -1452,7 +1644,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1452 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | 1644 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); |
1453 | } else { | 1645 | } else { |
1454 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | 1646 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", |
1455 | i, | 1647 | i, |
1456 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | 1648 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), |
1457 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | 1649 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), |
1458 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | 1650 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), |
@@ -1550,15 +1742,14 @@ static void nv_rx_process(struct net_device *dev) | |||
1550 | u32 Flags; | 1742 | u32 Flags; |
1551 | u32 vlanflags = 0; | 1743 | u32 vlanflags = 0; |
1552 | 1744 | ||
1553 | |||
1554 | for (;;) { | 1745 | for (;;) { |
1555 | struct sk_buff *skb; | 1746 | struct sk_buff *skb; |
1556 | int len; | 1747 | int len; |
1557 | int i; | 1748 | int i; |
1558 | if (np->cur_rx - np->refill_rx >= RX_RING) | 1749 | if (np->cur_rx - np->refill_rx >= np->rx_ring_size) |
1559 | break; /* we scanned the whole ring - do not continue */ | 1750 | break; /* we scanned the whole ring - do not continue */ |
1560 | 1751 | ||
1561 | i = np->cur_rx % RX_RING; | 1752 | i = np->cur_rx % np->rx_ring_size; |
1562 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1753 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1563 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); | 1754 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); |
1564 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | 1755 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); |
@@ -1665,14 +1856,16 @@ static void nv_rx_process(struct net_device *dev) | |||
1665 | } | 1856 | } |
1666 | } | 1857 | } |
1667 | } | 1858 | } |
1668 | Flags &= NV_RX2_CHECKSUMMASK; | 1859 | if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { |
1669 | if (Flags == NV_RX2_CHECKSUMOK1 || | 1860 | Flags &= NV_RX2_CHECKSUMMASK; |
1670 | Flags == NV_RX2_CHECKSUMOK2 || | 1861 | if (Flags == NV_RX2_CHECKSUMOK1 || |
1671 | Flags == NV_RX2_CHECKSUMOK3) { | 1862 | Flags == NV_RX2_CHECKSUMOK2 || |
1672 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | 1863 | Flags == NV_RX2_CHECKSUMOK3) { |
1673 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; | 1864 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); |
1674 | } else { | 1865 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; |
1675 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | 1866 | } else { |
1867 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | ||
1868 | } | ||
1676 | } | 1869 | } |
1677 | } | 1870 | } |
1678 | /* got a valid packet - forward it to the network core */ | 1871 | /* got a valid packet - forward it to the network core */ |
@@ -1747,18 +1940,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1747 | nv_drain_rx(dev); | 1940 | nv_drain_rx(dev); |
1748 | nv_drain_tx(dev); | 1941 | nv_drain_tx(dev); |
1749 | /* reinit driver view of the rx queue */ | 1942 | /* reinit driver view of the rx queue */ |
1750 | nv_init_rx(dev); | ||
1751 | nv_init_tx(dev); | ||
1752 | /* alloc new rx buffers */ | ||
1753 | set_bufsize(dev); | 1943 | set_bufsize(dev); |
1754 | if (nv_alloc_rx(dev)) { | 1944 | if (nv_init_ring(dev)) { |
1755 | if (!np->in_shutdown) | 1945 | if (!np->in_shutdown) |
1756 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1946 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
1757 | } | 1947 | } |
1758 | /* reinit nic view of the rx queue */ | 1948 | /* reinit nic view of the rx queue */ |
1759 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1949 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1760 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | 1950 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
1761 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1951 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
1762 | base + NvRegRingSizes); | 1952 | base + NvRegRingSizes); |
1763 | pci_push(base); | 1953 | pci_push(base); |
1764 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 1954 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
@@ -1832,16 +2022,16 @@ static void nv_set_multicast(struct net_device *dev) | |||
1832 | u8 __iomem *base = get_hwbase(dev); | 2022 | u8 __iomem *base = get_hwbase(dev); |
1833 | u32 addr[2]; | 2023 | u32 addr[2]; |
1834 | u32 mask[2]; | 2024 | u32 mask[2]; |
1835 | u32 pff; | 2025 | u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; |
1836 | 2026 | ||
1837 | memset(addr, 0, sizeof(addr)); | 2027 | memset(addr, 0, sizeof(addr)); |
1838 | memset(mask, 0, sizeof(mask)); | 2028 | memset(mask, 0, sizeof(mask)); |
1839 | 2029 | ||
1840 | if (dev->flags & IFF_PROMISC) { | 2030 | if (dev->flags & IFF_PROMISC) { |
1841 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | 2031 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); |
1842 | pff = NVREG_PFF_PROMISC; | 2032 | pff |= NVREG_PFF_PROMISC; |
1843 | } else { | 2033 | } else { |
1844 | pff = NVREG_PFF_MYADDR; | 2034 | pff |= NVREG_PFF_MYADDR; |
1845 | 2035 | ||
1846 | if (dev->flags & IFF_ALLMULTI || dev->mc_list) { | 2036 | if (dev->flags & IFF_ALLMULTI || dev->mc_list) { |
1847 | u32 alwaysOff[2]; | 2037 | u32 alwaysOff[2]; |
@@ -1886,6 +2076,35 @@ static void nv_set_multicast(struct net_device *dev) | |||
1886 | spin_unlock_irq(&np->lock); | 2076 | spin_unlock_irq(&np->lock); |
1887 | } | 2077 | } |
1888 | 2078 | ||
2079 | void nv_update_pause(struct net_device *dev, u32 pause_flags) | ||
2080 | { | ||
2081 | struct fe_priv *np = netdev_priv(dev); | ||
2082 | u8 __iomem *base = get_hwbase(dev); | ||
2083 | |||
2084 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); | ||
2085 | |||
2086 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { | ||
2087 | u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; | ||
2088 | if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { | ||
2089 | writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); | ||
2090 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
2091 | } else { | ||
2092 | writel(pff, base + NvRegPacketFilterFlags); | ||
2093 | } | ||
2094 | } | ||
2095 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { | ||
2096 | u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; | ||
2097 | if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { | ||
2098 | writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); | ||
2099 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | ||
2100 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
2101 | } else { | ||
2102 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | ||
2103 | writel(regmisc, base + NvRegMisc1); | ||
2104 | } | ||
2105 | } | ||
2106 | } | ||
2107 | |||
1889 | /** | 2108 | /** |
1890 | * nv_update_linkspeed: Setup the MAC according to the link partner | 2109 | * nv_update_linkspeed: Setup the MAC according to the link partner |
1891 | * @dev: Network device to be configured | 2110 | * @dev: Network device to be configured |
@@ -1901,12 +2120,14 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1901 | { | 2120 | { |
1902 | struct fe_priv *np = netdev_priv(dev); | 2121 | struct fe_priv *np = netdev_priv(dev); |
1903 | u8 __iomem *base = get_hwbase(dev); | 2122 | u8 __iomem *base = get_hwbase(dev); |
1904 | int adv, lpa; | 2123 | int adv = 0; |
2124 | int lpa = 0; | ||
2125 | int adv_lpa, adv_pause, lpa_pause; | ||
1905 | int newls = np->linkspeed; | 2126 | int newls = np->linkspeed; |
1906 | int newdup = np->duplex; | 2127 | int newdup = np->duplex; |
1907 | int mii_status; | 2128 | int mii_status; |
1908 | int retval = 0; | 2129 | int retval = 0; |
1909 | u32 control_1000, status_1000, phyreg; | 2130 | u32 control_1000, status_1000, phyreg, pause_flags; |
1910 | 2131 | ||
1911 | /* BMSR_LSTATUS is latched, read it twice: | 2132 | /* BMSR_LSTATUS is latched, read it twice: |
1912 | * we want the current value. | 2133 | * we want the current value. |
@@ -1952,10 +2173,15 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1952 | goto set_speed; | 2173 | goto set_speed; |
1953 | } | 2174 | } |
1954 | 2175 | ||
2176 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | ||
2177 | lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | ||
2178 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | ||
2179 | dev->name, adv, lpa); | ||
2180 | |||
1955 | retval = 1; | 2181 | retval = 1; |
1956 | if (np->gigabit == PHY_GIGABIT) { | 2182 | if (np->gigabit == PHY_GIGABIT) { |
1957 | control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 2183 | control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1958 | status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ); | 2184 | status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); |
1959 | 2185 | ||
1960 | if ((control_1000 & ADVERTISE_1000FULL) && | 2186 | if ((control_1000 & ADVERTISE_1000FULL) && |
1961 | (status_1000 & LPA_1000FULL)) { | 2187 | (status_1000 & LPA_1000FULL)) { |
@@ -1967,27 +2193,22 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1967 | } | 2193 | } |
1968 | } | 2194 | } |
1969 | 2195 | ||
1970 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | ||
1971 | lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | ||
1972 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | ||
1973 | dev->name, adv, lpa); | ||
1974 | |||
1975 | /* FIXME: handle parallel detection properly */ | 2196 | /* FIXME: handle parallel detection properly */ |
1976 | lpa = lpa & adv; | 2197 | adv_lpa = lpa & adv; |
1977 | if (lpa & LPA_100FULL) { | 2198 | if (adv_lpa & LPA_100FULL) { |
1978 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | 2199 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
1979 | newdup = 1; | 2200 | newdup = 1; |
1980 | } else if (lpa & LPA_100HALF) { | 2201 | } else if (adv_lpa & LPA_100HALF) { |
1981 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | 2202 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
1982 | newdup = 0; | 2203 | newdup = 0; |
1983 | } else if (lpa & LPA_10FULL) { | 2204 | } else if (adv_lpa & LPA_10FULL) { |
1984 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 2205 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1985 | newdup = 1; | 2206 | newdup = 1; |
1986 | } else if (lpa & LPA_10HALF) { | 2207 | } else if (adv_lpa & LPA_10HALF) { |
1987 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 2208 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1988 | newdup = 0; | 2209 | newdup = 0; |
1989 | } else { | 2210 | } else { |
1990 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa); | 2211 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); |
1991 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 2212 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1992 | newdup = 0; | 2213 | newdup = 0; |
1993 | } | 2214 | } |
@@ -2030,6 +2251,46 @@ set_speed: | |||
2030 | writel(np->linkspeed, base + NvRegLinkSpeed); | 2251 | writel(np->linkspeed, base + NvRegLinkSpeed); |
2031 | pci_push(base); | 2252 | pci_push(base); |
2032 | 2253 | ||
2254 | pause_flags = 0; | ||
2255 | /* setup pause frame */ | ||
2256 | if (np->duplex != 0) { | ||
2257 | if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { | ||
2258 | adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); | ||
2259 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | ||
2260 | |||
2261 | switch (adv_pause) { | ||
2262 | case (ADVERTISE_PAUSE_CAP): | ||
2263 | if (lpa_pause & LPA_PAUSE_CAP) { | ||
2264 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
2265 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | ||
2266 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
2267 | } | ||
2268 | break; | ||
2269 | case (ADVERTISE_PAUSE_ASYM): | ||
2270 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) | ||
2271 | { | ||
2272 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
2273 | } | ||
2274 | break; | ||
2275 | case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): | ||
2276 | if (lpa_pause & LPA_PAUSE_CAP) | ||
2277 | { | ||
2278 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
2279 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | ||
2280 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
2281 | } | ||
2282 | if (lpa_pause == LPA_PAUSE_ASYM) | ||
2283 | { | ||
2284 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
2285 | } | ||
2286 | break; | ||
2287 | } | ||
2288 | } else { | ||
2289 | pause_flags = np->pause_flags; | ||
2290 | } | ||
2291 | } | ||
2292 | nv_update_pause(dev, pause_flags); | ||
2293 | |||
2033 | return retval; | 2294 | return retval; |
2034 | } | 2295 | } |
2035 | 2296 | ||
@@ -2090,7 +2351,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2090 | spin_lock(&np->lock); | 2351 | spin_lock(&np->lock); |
2091 | nv_tx_done(dev); | 2352 | nv_tx_done(dev); |
2092 | spin_unlock(&np->lock); | 2353 | spin_unlock(&np->lock); |
2093 | 2354 | ||
2094 | nv_rx_process(dev); | 2355 | nv_rx_process(dev); |
2095 | if (nv_alloc_rx(dev)) { | 2356 | if (nv_alloc_rx(dev)) { |
2096 | spin_lock(&np->lock); | 2357 | spin_lock(&np->lock); |
@@ -2098,7 +2359,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2098 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2359 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2099 | spin_unlock(&np->lock); | 2360 | spin_unlock(&np->lock); |
2100 | } | 2361 | } |
2101 | 2362 | ||
2102 | if (events & NVREG_IRQ_LINK) { | 2363 | if (events & NVREG_IRQ_LINK) { |
2103 | spin_lock(&np->lock); | 2364 | spin_lock(&np->lock); |
2104 | nv_link_irq(dev); | 2365 | nv_link_irq(dev); |
@@ -2163,7 +2424,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2163 | spin_lock_irq(&np->lock); | 2424 | spin_lock_irq(&np->lock); |
2164 | nv_tx_done(dev); | 2425 | nv_tx_done(dev); |
2165 | spin_unlock_irq(&np->lock); | 2426 | spin_unlock_irq(&np->lock); |
2166 | 2427 | ||
2167 | if (events & (NVREG_IRQ_TX_ERR)) { | 2428 | if (events & (NVREG_IRQ_TX_ERR)) { |
2168 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2429 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2169 | dev->name, events); | 2430 | dev->name, events); |
@@ -2206,7 +2467,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2206 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | 2467 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); |
2207 | if (!(events & np->irqmask)) | 2468 | if (!(events & np->irqmask)) |
2208 | break; | 2469 | break; |
2209 | 2470 | ||
2210 | nv_rx_process(dev); | 2471 | nv_rx_process(dev); |
2211 | if (nv_alloc_rx(dev)) { | 2472 | if (nv_alloc_rx(dev)) { |
2212 | spin_lock_irq(&np->lock); | 2473 | spin_lock_irq(&np->lock); |
@@ -2214,7 +2475,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2214 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2475 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2215 | spin_unlock_irq(&np->lock); | 2476 | spin_unlock_irq(&np->lock); |
2216 | } | 2477 | } |
2217 | 2478 | ||
2218 | if (i > max_interrupt_work) { | 2479 | if (i > max_interrupt_work) { |
2219 | spin_lock_irq(&np->lock); | 2480 | spin_lock_irq(&np->lock); |
2220 | /* disable interrupts on the nic */ | 2481 | /* disable interrupts on the nic */ |
@@ -2253,7 +2514,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2253 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2514 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2254 | if (!(events & np->irqmask)) | 2515 | if (!(events & np->irqmask)) |
2255 | break; | 2516 | break; |
2256 | 2517 | ||
2257 | if (events & NVREG_IRQ_LINK) { | 2518 | if (events & NVREG_IRQ_LINK) { |
2258 | spin_lock_irq(&np->lock); | 2519 | spin_lock_irq(&np->lock); |
2259 | nv_link_irq(dev); | 2520 | nv_link_irq(dev); |
@@ -2290,6 +2551,175 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2290 | return IRQ_RETVAL(i); | 2551 | return IRQ_RETVAL(i); |
2291 | } | 2552 | } |
2292 | 2553 | ||
2554 | static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs) | ||
2555 | { | ||
2556 | struct net_device *dev = (struct net_device *) data; | ||
2557 | struct fe_priv *np = netdev_priv(dev); | ||
2558 | u8 __iomem *base = get_hwbase(dev); | ||
2559 | u32 events; | ||
2560 | |||
2561 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); | ||
2562 | |||
2563 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | ||
2564 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2565 | writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); | ||
2566 | } else { | ||
2567 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2568 | writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); | ||
2569 | } | ||
2570 | pci_push(base); | ||
2571 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2572 | if (!(events & NVREG_IRQ_TIMER)) | ||
2573 | return IRQ_RETVAL(0); | ||
2574 | |||
2575 | spin_lock(&np->lock); | ||
2576 | np->intr_test = 1; | ||
2577 | spin_unlock(&np->lock); | ||
2578 | |||
2579 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); | ||
2580 | |||
2581 | return IRQ_RETVAL(1); | ||
2582 | } | ||
2583 | |||
2584 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
2585 | { | ||
2586 | u8 __iomem *base = get_hwbase(dev); | ||
2587 | int i; | ||
2588 | u32 msixmap = 0; | ||
2589 | |||
2590 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
2591 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
2592 | * the remaining 8 interrupts. | ||
2593 | */ | ||
2594 | for (i = 0; i < 8; i++) { | ||
2595 | if ((irqmask >> i) & 0x1) { | ||
2596 | msixmap |= vector << (i << 2); | ||
2597 | } | ||
2598 | } | ||
2599 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2600 | |||
2601 | msixmap = 0; | ||
2602 | for (i = 0; i < 8; i++) { | ||
2603 | if ((irqmask >> (i + 8)) & 0x1) { | ||
2604 | msixmap |= vector << (i << 2); | ||
2605 | } | ||
2606 | } | ||
2607 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
2608 | } | ||
2609 | |||
2610 | static int nv_request_irq(struct net_device *dev, int intr_test) | ||
2611 | { | ||
2612 | struct fe_priv *np = get_nvpriv(dev); | ||
2613 | u8 __iomem *base = get_hwbase(dev); | ||
2614 | int ret = 1; | ||
2615 | int i; | ||
2616 | |||
2617 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
2618 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2619 | np->msi_x_entry[i].entry = i; | ||
2620 | } | ||
2621 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2622 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2623 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { | ||
2624 | /* Request irq for rx handling */ | ||
2625 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2626 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2627 | pci_disable_msix(np->pci_dev); | ||
2628 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2629 | goto out_err; | ||
2630 | } | ||
2631 | /* Request irq for tx handling */ | ||
2632 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2633 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2634 | pci_disable_msix(np->pci_dev); | ||
2635 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2636 | goto out_free_rx; | ||
2637 | } | ||
2638 | /* Request irq for link and timer handling */ | ||
2639 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2640 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2641 | pci_disable_msix(np->pci_dev); | ||
2642 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2643 | goto out_free_tx; | ||
2644 | } | ||
2645 | /* map interrupts to their respective vector */ | ||
2646 | writel(0, base + NvRegMSIXMap0); | ||
2647 | writel(0, base + NvRegMSIXMap1); | ||
2648 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2649 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2650 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2651 | } else { | ||
2652 | /* Request irq for all interrupts */ | ||
2653 | if ((!intr_test && | ||
2654 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || | ||
2655 | (intr_test && | ||
2656 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { | ||
2657 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2658 | pci_disable_msix(np->pci_dev); | ||
2659 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2660 | goto out_err; | ||
2661 | } | ||
2662 | |||
2663 | /* map interrupts to vector 0 */ | ||
2664 | writel(0, base + NvRegMSIXMap0); | ||
2665 | writel(0, base + NvRegMSIXMap1); | ||
2666 | } | ||
2667 | } | ||
2668 | } | ||
2669 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2670 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2671 | np->msi_flags |= NV_MSI_ENABLED; | ||
2672 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || | ||
2673 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { | ||
2674 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2675 | pci_disable_msi(np->pci_dev); | ||
2676 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2677 | goto out_err; | ||
2678 | } | ||
2679 | |||
2680 | /* map interrupts to vector 0 */ | ||
2681 | writel(0, base + NvRegMSIMap0); | ||
2682 | writel(0, base + NvRegMSIMap1); | ||
2683 | /* enable msi vector 0 */ | ||
2684 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2685 | } | ||
2686 | } | ||
2687 | if (ret != 0) { | ||
2688 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || | ||
2689 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) | ||
2690 | goto out_err; | ||
2691 | |||
2692 | } | ||
2693 | |||
2694 | return 0; | ||
2695 | out_free_tx: | ||
2696 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
2697 | out_free_rx: | ||
2698 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
2699 | out_err: | ||
2700 | return 1; | ||
2701 | } | ||
2702 | |||
2703 | static void nv_free_irq(struct net_device *dev) | ||
2704 | { | ||
2705 | struct fe_priv *np = get_nvpriv(dev); | ||
2706 | int i; | ||
2707 | |||
2708 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
2709 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2710 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2711 | } | ||
2712 | pci_disable_msix(np->pci_dev); | ||
2713 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2714 | } else { | ||
2715 | free_irq(np->pci_dev->irq, dev); | ||
2716 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2717 | pci_disable_msi(np->pci_dev); | ||
2718 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2719 | } | ||
2720 | } | ||
2721 | } | ||
2722 | |||
2293 | static void nv_do_nic_poll(unsigned long data) | 2723 | static void nv_do_nic_poll(unsigned long data) |
2294 | { | 2724 | { |
2295 | struct net_device *dev = (struct net_device *) data; | 2725 | struct net_device *dev = (struct net_device *) data; |
@@ -2326,7 +2756,7 @@ static void nv_do_nic_poll(unsigned long data) | |||
2326 | np->nic_poll_irq = 0; | 2756 | np->nic_poll_irq = 0; |
2327 | 2757 | ||
2328 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | 2758 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ |
2329 | 2759 | ||
2330 | writel(mask, base + NvRegIrqMask); | 2760 | writel(mask, base + NvRegIrqMask); |
2331 | pci_push(base); | 2761 | pci_push(base); |
2332 | 2762 | ||
@@ -2359,6 +2789,56 @@ static void nv_poll_controller(struct net_device *dev) | |||
2359 | } | 2789 | } |
2360 | #endif | 2790 | #endif |
2361 | 2791 | ||
2792 | static void nv_do_stats_poll(unsigned long data) | ||
2793 | { | ||
2794 | struct net_device *dev = (struct net_device *) data; | ||
2795 | struct fe_priv *np = netdev_priv(dev); | ||
2796 | u8 __iomem *base = get_hwbase(dev); | ||
2797 | |||
2798 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | ||
2799 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | ||
2800 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | ||
2801 | np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); | ||
2802 | np->estats.tx_late_collision += readl(base + NvRegTxLateCol); | ||
2803 | np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); | ||
2804 | np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); | ||
2805 | np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); | ||
2806 | np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); | ||
2807 | np->estats.tx_deferral += readl(base + NvRegTxDef); | ||
2808 | np->estats.tx_packets += readl(base + NvRegTxFrame); | ||
2809 | np->estats.tx_pause += readl(base + NvRegTxPause); | ||
2810 | np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); | ||
2811 | np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); | ||
2812 | np->estats.rx_late_collision += readl(base + NvRegRxLateCol); | ||
2813 | np->estats.rx_runt += readl(base + NvRegRxRunt); | ||
2814 | np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); | ||
2815 | np->estats.rx_over_errors += readl(base + NvRegRxOverflow); | ||
2816 | np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); | ||
2817 | np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); | ||
2818 | np->estats.rx_length_error += readl(base + NvRegRxLenErr); | ||
2819 | np->estats.rx_unicast += readl(base + NvRegRxUnicast); | ||
2820 | np->estats.rx_multicast += readl(base + NvRegRxMulticast); | ||
2821 | np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); | ||
2822 | np->estats.rx_bytes += readl(base + NvRegRxCnt); | ||
2823 | np->estats.rx_pause += readl(base + NvRegRxPause); | ||
2824 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | ||
2825 | np->estats.rx_packets = | ||
2826 | np->estats.rx_unicast + | ||
2827 | np->estats.rx_multicast + | ||
2828 | np->estats.rx_broadcast; | ||
2829 | np->estats.rx_errors_total = | ||
2830 | np->estats.rx_crc_errors + | ||
2831 | np->estats.rx_over_errors + | ||
2832 | np->estats.rx_frame_error + | ||
2833 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + | ||
2834 | np->estats.rx_late_collision + | ||
2835 | np->estats.rx_runt + | ||
2836 | np->estats.rx_frame_too_long; | ||
2837 | |||
2838 | if (!np->in_shutdown) | ||
2839 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | ||
2840 | } | ||
2841 | |||
2362 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 2842 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2363 | { | 2843 | { |
2364 | struct fe_priv *np = netdev_priv(dev); | 2844 | struct fe_priv *np = netdev_priv(dev); |
@@ -2382,17 +2862,19 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |||
2382 | { | 2862 | { |
2383 | struct fe_priv *np = netdev_priv(dev); | 2863 | struct fe_priv *np = netdev_priv(dev); |
2384 | u8 __iomem *base = get_hwbase(dev); | 2864 | u8 __iomem *base = get_hwbase(dev); |
2865 | u32 flags = 0; | ||
2385 | 2866 | ||
2386 | spin_lock_irq(&np->lock); | ||
2387 | if (wolinfo->wolopts == 0) { | 2867 | if (wolinfo->wolopts == 0) { |
2388 | writel(0, base + NvRegWakeUpFlags); | ||
2389 | np->wolenabled = 0; | 2868 | np->wolenabled = 0; |
2390 | } | 2869 | } else if (wolinfo->wolopts & WAKE_MAGIC) { |
2391 | if (wolinfo->wolopts & WAKE_MAGIC) { | ||
2392 | writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags); | ||
2393 | np->wolenabled = 1; | 2870 | np->wolenabled = 1; |
2871 | flags = NVREG_WAKEUPFLAGS_ENABLE; | ||
2872 | } | ||
2873 | if (netif_running(dev)) { | ||
2874 | spin_lock_irq(&np->lock); | ||
2875 | writel(flags, base + NvRegWakeUpFlags); | ||
2876 | spin_unlock_irq(&np->lock); | ||
2394 | } | 2877 | } |
2395 | spin_unlock_irq(&np->lock); | ||
2396 | return 0; | 2878 | return 0; |
2397 | } | 2879 | } |
2398 | 2880 | ||
@@ -2406,9 +2888,17 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2406 | if (!netif_running(dev)) { | 2888 | if (!netif_running(dev)) { |
2407 | /* We do not track link speed / duplex setting if the | 2889 | /* We do not track link speed / duplex setting if the |
2408 | * interface is disabled. Force a link check */ | 2890 | * interface is disabled. Force a link check */ |
2409 | nv_update_linkspeed(dev); | 2891 | if (nv_update_linkspeed(dev)) { |
2892 | if (!netif_carrier_ok(dev)) | ||
2893 | netif_carrier_on(dev); | ||
2894 | } else { | ||
2895 | if (netif_carrier_ok(dev)) | ||
2896 | netif_carrier_off(dev); | ||
2897 | } | ||
2410 | } | 2898 | } |
2411 | switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { | 2899 | |
2900 | if (netif_carrier_ok(dev)) { | ||
2901 | switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { | ||
2412 | case NVREG_LINKSPEED_10: | 2902 | case NVREG_LINKSPEED_10: |
2413 | ecmd->speed = SPEED_10; | 2903 | ecmd->speed = SPEED_10; |
2414 | break; | 2904 | break; |
@@ -2418,10 +2908,14 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2418 | case NVREG_LINKSPEED_1000: | 2908 | case NVREG_LINKSPEED_1000: |
2419 | ecmd->speed = SPEED_1000; | 2909 | ecmd->speed = SPEED_1000; |
2420 | break; | 2910 | break; |
2911 | } | ||
2912 | ecmd->duplex = DUPLEX_HALF; | ||
2913 | if (np->duplex) | ||
2914 | ecmd->duplex = DUPLEX_FULL; | ||
2915 | } else { | ||
2916 | ecmd->speed = -1; | ||
2917 | ecmd->duplex = -1; | ||
2421 | } | 2918 | } |
2422 | ecmd->duplex = DUPLEX_HALF; | ||
2423 | if (np->duplex) | ||
2424 | ecmd->duplex = DUPLEX_FULL; | ||
2425 | 2919 | ||
2426 | ecmd->autoneg = np->autoneg; | 2920 | ecmd->autoneg = np->autoneg; |
2427 | 2921 | ||
@@ -2429,23 +2923,20 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2429 | if (np->autoneg) { | 2923 | if (np->autoneg) { |
2430 | ecmd->advertising |= ADVERTISED_Autoneg; | 2924 | ecmd->advertising |= ADVERTISED_Autoneg; |
2431 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 2925 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2432 | } else { | 2926 | if (adv & ADVERTISE_10HALF) |
2433 | adv = np->fixed_mode; | 2927 | ecmd->advertising |= ADVERTISED_10baseT_Half; |
2434 | } | 2928 | if (adv & ADVERTISE_10FULL) |
2435 | if (adv & ADVERTISE_10HALF) | 2929 | ecmd->advertising |= ADVERTISED_10baseT_Full; |
2436 | ecmd->advertising |= ADVERTISED_10baseT_Half; | 2930 | if (adv & ADVERTISE_100HALF) |
2437 | if (adv & ADVERTISE_10FULL) | 2931 | ecmd->advertising |= ADVERTISED_100baseT_Half; |
2438 | ecmd->advertising |= ADVERTISED_10baseT_Full; | 2932 | if (adv & ADVERTISE_100FULL) |
2439 | if (adv & ADVERTISE_100HALF) | 2933 | ecmd->advertising |= ADVERTISED_100baseT_Full; |
2440 | ecmd->advertising |= ADVERTISED_100baseT_Half; | 2934 | if (np->gigabit == PHY_GIGABIT) { |
2441 | if (adv & ADVERTISE_100FULL) | 2935 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2442 | ecmd->advertising |= ADVERTISED_100baseT_Full; | 2936 | if (adv & ADVERTISE_1000FULL) |
2443 | if (np->autoneg && np->gigabit == PHY_GIGABIT) { | 2937 | ecmd->advertising |= ADVERTISED_1000baseT_Full; |
2444 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 2938 | } |
2445 | if (adv & ADVERTISE_1000FULL) | ||
2446 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
2447 | } | 2939 | } |
2448 | |||
2449 | ecmd->supported = (SUPPORTED_Autoneg | | 2940 | ecmd->supported = (SUPPORTED_Autoneg | |
2450 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | 2941 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
2451 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | 2942 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | |
@@ -2497,7 +2988,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2497 | return -EINVAL; | 2988 | return -EINVAL; |
2498 | } | 2989 | } |
2499 | 2990 | ||
2500 | spin_lock_irq(&np->lock); | 2991 | netif_carrier_off(dev); |
2992 | if (netif_running(dev)) { | ||
2993 | nv_disable_irq(dev); | ||
2994 | spin_lock_bh(&dev->xmit_lock); | ||
2995 | spin_lock(&np->lock); | ||
2996 | /* stop engines */ | ||
2997 | nv_stop_rx(dev); | ||
2998 | nv_stop_tx(dev); | ||
2999 | spin_unlock(&np->lock); | ||
3000 | spin_unlock_bh(&dev->xmit_lock); | ||
3001 | } | ||
3002 | |||
2501 | if (ecmd->autoneg == AUTONEG_ENABLE) { | 3003 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
2502 | int adv, bmcr; | 3004 | int adv, bmcr; |
2503 | 3005 | ||
@@ -2505,7 +3007,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2505 | 3007 | ||
2506 | /* advertise only what has been requested */ | 3008 | /* advertise only what has been requested */ |
2507 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 3009 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2508 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | 3010 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2509 | if (ecmd->advertising & ADVERTISED_10baseT_Half) | 3011 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
2510 | adv |= ADVERTISE_10HALF; | 3012 | adv |= ADVERTISE_10HALF; |
2511 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | 3013 | if (ecmd->advertising & ADVERTISED_10baseT_Full) |
@@ -2514,16 +3016,22 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2514 | adv |= ADVERTISE_100HALF; | 3016 | adv |= ADVERTISE_100HALF; |
2515 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | 3017 | if (ecmd->advertising & ADVERTISED_100baseT_Full) |
2516 | adv |= ADVERTISE_100FULL; | 3018 | adv |= ADVERTISE_100FULL; |
3019 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | ||
3020 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
3021 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | ||
3022 | adv |= ADVERTISE_PAUSE_ASYM; | ||
2517 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | 3023 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2518 | 3024 | ||
2519 | if (np->gigabit == PHY_GIGABIT) { | 3025 | if (np->gigabit == PHY_GIGABIT) { |
2520 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 3026 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2521 | adv &= ~ADVERTISE_1000FULL; | 3027 | adv &= ~ADVERTISE_1000FULL; |
2522 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | 3028 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) |
2523 | adv |= ADVERTISE_1000FULL; | 3029 | adv |= ADVERTISE_1000FULL; |
2524 | mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); | 3030 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
2525 | } | 3031 | } |
2526 | 3032 | ||
3033 | if (netif_running(dev)) | ||
3034 | printk(KERN_INFO "%s: link down.\n", dev->name); | ||
2527 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 3035 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
2528 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | 3036 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
2529 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | 3037 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); |
@@ -2534,7 +3042,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2534 | np->autoneg = 0; | 3042 | np->autoneg = 0; |
2535 | 3043 | ||
2536 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 3044 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2537 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | 3045 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2538 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) | 3046 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) |
2539 | adv |= ADVERTISE_10HALF; | 3047 | adv |= ADVERTISE_10HALF; |
2540 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) | 3048 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) |
@@ -2543,30 +3051,49 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2543 | adv |= ADVERTISE_100HALF; | 3051 | adv |= ADVERTISE_100HALF; |
2544 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) | 3052 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) |
2545 | adv |= ADVERTISE_100FULL; | 3053 | adv |= ADVERTISE_100FULL; |
3054 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | ||
3055 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ | ||
3056 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
3057 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
3058 | } | ||
3059 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { | ||
3060 | adv |= ADVERTISE_PAUSE_ASYM; | ||
3061 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
3062 | } | ||
2546 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | 3063 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2547 | np->fixed_mode = adv; | 3064 | np->fixed_mode = adv; |
2548 | 3065 | ||
2549 | if (np->gigabit == PHY_GIGABIT) { | 3066 | if (np->gigabit == PHY_GIGABIT) { |
2550 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 3067 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2551 | adv &= ~ADVERTISE_1000FULL; | 3068 | adv &= ~ADVERTISE_1000FULL; |
2552 | mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); | 3069 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
2553 | } | 3070 | } |
2554 | 3071 | ||
2555 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 3072 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
2556 | bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX); | 3073 | bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); |
2557 | if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL)) | 3074 | if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) |
2558 | bmcr |= BMCR_FULLDPLX; | 3075 | bmcr |= BMCR_FULLDPLX; |
2559 | if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL)) | 3076 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) |
2560 | bmcr |= BMCR_SPEED100; | 3077 | bmcr |= BMCR_SPEED100; |
2561 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | 3078 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); |
2562 | 3079 | if (np->phy_oui == PHY_OUI_MARVELL) { | |
2563 | if (netif_running(dev)) { | 3080 | /* reset the phy */ |
3081 | if (phy_reset(dev)) { | ||
3082 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | ||
3083 | return -EINVAL; | ||
3084 | } | ||
3085 | } else if (netif_running(dev)) { | ||
2564 | /* Wait a bit and then reconfigure the nic. */ | 3086 | /* Wait a bit and then reconfigure the nic. */ |
2565 | udelay(10); | 3087 | udelay(10); |
2566 | nv_linkchange(dev); | 3088 | nv_linkchange(dev); |
2567 | } | 3089 | } |
2568 | } | 3090 | } |
2569 | spin_unlock_irq(&np->lock); | 3091 | |
3092 | if (netif_running(dev)) { | ||
3093 | nv_start_rx(dev); | ||
3094 | nv_start_tx(dev); | ||
3095 | nv_enable_irq(dev); | ||
3096 | } | ||
2570 | 3097 | ||
2571 | return 0; | 3098 | return 0; |
2572 | } | 3099 | } |
@@ -2598,24 +3125,39 @@ static int nv_nway_reset(struct net_device *dev) | |||
2598 | struct fe_priv *np = netdev_priv(dev); | 3125 | struct fe_priv *np = netdev_priv(dev); |
2599 | int ret; | 3126 | int ret; |
2600 | 3127 | ||
2601 | spin_lock_irq(&np->lock); | ||
2602 | if (np->autoneg) { | 3128 | if (np->autoneg) { |
2603 | int bmcr; | 3129 | int bmcr; |
2604 | 3130 | ||
3131 | netif_carrier_off(dev); | ||
3132 | if (netif_running(dev)) { | ||
3133 | nv_disable_irq(dev); | ||
3134 | spin_lock_bh(&dev->xmit_lock); | ||
3135 | spin_lock(&np->lock); | ||
3136 | /* stop engines */ | ||
3137 | nv_stop_rx(dev); | ||
3138 | nv_stop_tx(dev); | ||
3139 | spin_unlock(&np->lock); | ||
3140 | spin_unlock_bh(&dev->xmit_lock); | ||
3141 | printk(KERN_INFO "%s: link down.\n", dev->name); | ||
3142 | } | ||
3143 | |||
2605 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 3144 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
2606 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | 3145 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
2607 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | 3146 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); |
2608 | 3147 | ||
3148 | if (netif_running(dev)) { | ||
3149 | nv_start_rx(dev); | ||
3150 | nv_start_tx(dev); | ||
3151 | nv_enable_irq(dev); | ||
3152 | } | ||
2609 | ret = 0; | 3153 | ret = 0; |
2610 | } else { | 3154 | } else { |
2611 | ret = -EINVAL; | 3155 | ret = -EINVAL; |
2612 | } | 3156 | } |
2613 | spin_unlock_irq(&np->lock); | ||
2614 | 3157 | ||
2615 | return ret; | 3158 | return ret; |
2616 | } | 3159 | } |
2617 | 3160 | ||
2618 | #ifdef NETIF_F_TSO | ||
2619 | static int nv_set_tso(struct net_device *dev, u32 value) | 3161 | static int nv_set_tso(struct net_device *dev, u32 value) |
2620 | { | 3162 | { |
2621 | struct fe_priv *np = netdev_priv(dev); | 3163 | struct fe_priv *np = netdev_priv(dev); |
@@ -2623,187 +3165,702 @@ static int nv_set_tso(struct net_device *dev, u32 value) | |||
2623 | if ((np->driver_data & DEV_HAS_CHECKSUM)) | 3165 | if ((np->driver_data & DEV_HAS_CHECKSUM)) |
2624 | return ethtool_op_set_tso(dev, value); | 3166 | return ethtool_op_set_tso(dev, value); |
2625 | else | 3167 | else |
2626 | return value ? -EOPNOTSUPP : 0; | 3168 | return -EOPNOTSUPP; |
2627 | } | 3169 | } |
2628 | #endif | ||
2629 | 3170 | ||
2630 | static struct ethtool_ops ops = { | 3171 | static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) |
2631 | .get_drvinfo = nv_get_drvinfo, | 3172 | { |
2632 | .get_link = ethtool_op_get_link, | 3173 | struct fe_priv *np = netdev_priv(dev); |
2633 | .get_wol = nv_get_wol, | ||
2634 | .set_wol = nv_set_wol, | ||
2635 | .get_settings = nv_get_settings, | ||
2636 | .set_settings = nv_set_settings, | ||
2637 | .get_regs_len = nv_get_regs_len, | ||
2638 | .get_regs = nv_get_regs, | ||
2639 | .nway_reset = nv_nway_reset, | ||
2640 | .get_perm_addr = ethtool_op_get_perm_addr, | ||
2641 | #ifdef NETIF_F_TSO | ||
2642 | .get_tso = ethtool_op_get_tso, | ||
2643 | .set_tso = nv_set_tso | ||
2644 | #endif | ||
2645 | }; | ||
2646 | 3174 | ||
2647 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 3175 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; |
3176 | ring->rx_mini_max_pending = 0; | ||
3177 | ring->rx_jumbo_max_pending = 0; | ||
3178 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | ||
3179 | |||
3180 | ring->rx_pending = np->rx_ring_size; | ||
3181 | ring->rx_mini_pending = 0; | ||
3182 | ring->rx_jumbo_pending = 0; | ||
3183 | ring->tx_pending = np->tx_ring_size; | ||
3184 | } | ||
3185 | |||
3186 | static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | ||
2648 | { | 3187 | { |
2649 | struct fe_priv *np = get_nvpriv(dev); | 3188 | struct fe_priv *np = netdev_priv(dev); |
3189 | u8 __iomem *base = get_hwbase(dev); | ||
3190 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; | ||
3191 | dma_addr_t ring_addr; | ||
2650 | 3192 | ||
2651 | spin_lock_irq(&np->lock); | 3193 | if (ring->rx_pending < RX_RING_MIN || |
3194 | ring->tx_pending < TX_RING_MIN || | ||
3195 | ring->rx_mini_pending != 0 || | ||
3196 | ring->rx_jumbo_pending != 0 || | ||
3197 | (np->desc_ver == DESC_VER_1 && | ||
3198 | (ring->rx_pending > RING_MAX_DESC_VER_1 || | ||
3199 | ring->tx_pending > RING_MAX_DESC_VER_1)) || | ||
3200 | (np->desc_ver != DESC_VER_1 && | ||
3201 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || | ||
3202 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { | ||
3203 | return -EINVAL; | ||
3204 | } | ||
2652 | 3205 | ||
2653 | /* save vlan group */ | 3206 | /* allocate new rings */ |
2654 | np->vlangrp = grp; | 3207 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3208 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | ||
3209 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | ||
3210 | &ring_addr); | ||
3211 | } else { | ||
3212 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | ||
3213 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | ||
3214 | &ring_addr); | ||
3215 | } | ||
3216 | rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); | ||
3217 | rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); | ||
3218 | tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); | ||
3219 | tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); | ||
3220 | tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); | ||
3221 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | ||
3222 | /* fall back to old rings */ | ||
3223 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3224 | if(rxtx_ring) | ||
3225 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | ||
3226 | rxtx_ring, ring_addr); | ||
3227 | } else { | ||
3228 | if (rxtx_ring) | ||
3229 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | ||
3230 | rxtx_ring, ring_addr); | ||
3231 | } | ||
3232 | if (rx_skbuff) | ||
3233 | kfree(rx_skbuff); | ||
3234 | if (rx_dma) | ||
3235 | kfree(rx_dma); | ||
3236 | if (tx_skbuff) | ||
3237 | kfree(tx_skbuff); | ||
3238 | if (tx_dma) | ||
3239 | kfree(tx_dma); | ||
3240 | if (tx_dma_len) | ||
3241 | kfree(tx_dma_len); | ||
3242 | goto exit; | ||
3243 | } | ||
2655 | 3244 | ||
2656 | if (grp) { | 3245 | if (netif_running(dev)) { |
2657 | /* enable vlan on MAC */ | 3246 | nv_disable_irq(dev); |
2658 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | 3247 | spin_lock_bh(&dev->xmit_lock); |
3248 | spin_lock(&np->lock); | ||
3249 | /* stop engines */ | ||
3250 | nv_stop_rx(dev); | ||
3251 | nv_stop_tx(dev); | ||
3252 | nv_txrx_reset(dev); | ||
3253 | /* drain queues */ | ||
3254 | nv_drain_rx(dev); | ||
3255 | nv_drain_tx(dev); | ||
3256 | /* delete queues */ | ||
3257 | free_rings(dev); | ||
3258 | } | ||
3259 | |||
3260 | /* set new values */ | ||
3261 | np->rx_ring_size = ring->rx_pending; | ||
3262 | np->tx_ring_size = ring->tx_pending; | ||
3263 | np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; | ||
3264 | np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; | ||
3265 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3266 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | ||
3267 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | ||
2659 | } else { | 3268 | } else { |
2660 | /* disable vlan on MAC */ | 3269 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; |
2661 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | 3270 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
2662 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | ||
2663 | } | 3271 | } |
3272 | np->rx_skbuff = (struct sk_buff**)rx_skbuff; | ||
3273 | np->rx_dma = (dma_addr_t*)rx_dma; | ||
3274 | np->tx_skbuff = (struct sk_buff**)tx_skbuff; | ||
3275 | np->tx_dma = (dma_addr_t*)tx_dma; | ||
3276 | np->tx_dma_len = (unsigned int*)tx_dma_len; | ||
3277 | np->ring_addr = ring_addr; | ||
3278 | |||
3279 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | ||
3280 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | ||
3281 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
3282 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
3283 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
2664 | 3284 | ||
2665 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 3285 | if (netif_running(dev)) { |
3286 | /* reinit driver view of the queues */ | ||
3287 | set_bufsize(dev); | ||
3288 | if (nv_init_ring(dev)) { | ||
3289 | if (!np->in_shutdown) | ||
3290 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3291 | } | ||
2666 | 3292 | ||
2667 | spin_unlock_irq(&np->lock); | 3293 | /* reinit nic view of the queues */ |
2668 | }; | 3294 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
3295 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
3296 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
3297 | base + NvRegRingSizes); | ||
3298 | pci_push(base); | ||
3299 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3300 | pci_push(base); | ||
2669 | 3301 | ||
2670 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | 3302 | /* restart engines */ |
3303 | nv_start_rx(dev); | ||
3304 | nv_start_tx(dev); | ||
3305 | spin_unlock(&np->lock); | ||
3306 | spin_unlock_bh(&dev->xmit_lock); | ||
3307 | nv_enable_irq(dev); | ||
3308 | } | ||
3309 | return 0; | ||
3310 | exit: | ||
3311 | return -ENOMEM; | ||
3312 | } | ||
3313 | |||
3314 | static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) | ||
2671 | { | 3315 | { |
2672 | /* nothing to do */ | 3316 | struct fe_priv *np = netdev_priv(dev); |
2673 | }; | ||
2674 | 3317 | ||
2675 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | 3318 | pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; |
3319 | pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; | ||
3320 | pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; | ||
3321 | } | ||
3322 | |||
3323 | static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) | ||
3324 | { | ||
3325 | struct fe_priv *np = netdev_priv(dev); | ||
3326 | int adv, bmcr; | ||
3327 | |||
3328 | if ((!np->autoneg && np->duplex == 0) || | ||
3329 | (np->autoneg && !pause->autoneg && np->duplex == 0)) { | ||
3330 | printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", | ||
3331 | dev->name); | ||
3332 | return -EINVAL; | ||
3333 | } | ||
3334 | if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { | ||
3335 | printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); | ||
3336 | return -EINVAL; | ||
3337 | } | ||
3338 | |||
3339 | netif_carrier_off(dev); | ||
3340 | if (netif_running(dev)) { | ||
3341 | nv_disable_irq(dev); | ||
3342 | spin_lock_bh(&dev->xmit_lock); | ||
3343 | spin_lock(&np->lock); | ||
3344 | /* stop engines */ | ||
3345 | nv_stop_rx(dev); | ||
3346 | nv_stop_tx(dev); | ||
3347 | spin_unlock(&np->lock); | ||
3348 | spin_unlock_bh(&dev->xmit_lock); | ||
3349 | } | ||
3350 | |||
3351 | np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); | ||
3352 | if (pause->rx_pause) | ||
3353 | np->pause_flags |= NV_PAUSEFRAME_RX_REQ; | ||
3354 | if (pause->tx_pause) | ||
3355 | np->pause_flags |= NV_PAUSEFRAME_TX_REQ; | ||
3356 | |||
3357 | if (np->autoneg && pause->autoneg) { | ||
3358 | np->pause_flags |= NV_PAUSEFRAME_AUTONEG; | ||
3359 | |||
3360 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | ||
3361 | adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | ||
3362 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | ||
3363 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
3364 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | ||
3365 | adv |= ADVERTISE_PAUSE_ASYM; | ||
3366 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | ||
3367 | |||
3368 | if (netif_running(dev)) | ||
3369 | printk(KERN_INFO "%s: link down.\n", dev->name); | ||
3370 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | ||
3371 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
3372 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | ||
3373 | } else { | ||
3374 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | ||
3375 | if (pause->rx_pause) | ||
3376 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
3377 | if (pause->tx_pause) | ||
3378 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
3379 | |||
3380 | if (!netif_running(dev)) | ||
3381 | nv_update_linkspeed(dev); | ||
3382 | else | ||
3383 | nv_update_pause(dev, np->pause_flags); | ||
3384 | } | ||
3385 | |||
3386 | if (netif_running(dev)) { | ||
3387 | nv_start_rx(dev); | ||
3388 | nv_start_tx(dev); | ||
3389 | nv_enable_irq(dev); | ||
3390 | } | ||
3391 | return 0; | ||
3392 | } | ||
3393 | |||
3394 | static u32 nv_get_rx_csum(struct net_device *dev) | ||
3395 | { | ||
3396 | struct fe_priv *np = netdev_priv(dev); | ||
3397 | return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; | ||
3398 | } | ||
3399 | |||
3400 | static int nv_set_rx_csum(struct net_device *dev, u32 data) | ||
2676 | { | 3401 | { |
3402 | struct fe_priv *np = netdev_priv(dev); | ||
2677 | u8 __iomem *base = get_hwbase(dev); | 3403 | u8 __iomem *base = get_hwbase(dev); |
2678 | int i; | 3404 | int retcode = 0; |
2679 | u32 msixmap = 0; | ||
2680 | 3405 | ||
2681 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | 3406 | if (np->driver_data & DEV_HAS_CHECKSUM) { |
2682 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | 3407 | |
2683 | * the remaining 8 interrupts. | 3408 | if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) || |
2684 | */ | 3409 | (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) { |
2685 | for (i = 0; i < 8; i++) { | 3410 | /* already set or unset */ |
2686 | if ((irqmask >> i) & 0x1) { | 3411 | return 0; |
2687 | msixmap |= vector << (i << 2); | ||
2688 | } | 3412 | } |
2689 | } | ||
2690 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2691 | 3413 | ||
2692 | msixmap = 0; | 3414 | if (data) { |
2693 | for (i = 0; i < 8; i++) { | 3415 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
2694 | if ((irqmask >> (i + 8)) & 0x1) { | 3416 | } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) { |
2695 | msixmap |= vector << (i << 2); | 3417 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; |
3418 | } else { | ||
3419 | printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); | ||
3420 | return -EINVAL; | ||
2696 | } | 3421 | } |
3422 | |||
3423 | if (netif_running(dev)) { | ||
3424 | spin_lock_irq(&np->lock); | ||
3425 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | ||
3426 | spin_unlock_irq(&np->lock); | ||
3427 | } | ||
3428 | } else { | ||
3429 | return -EINVAL; | ||
2697 | } | 3430 | } |
2698 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | 3431 | |
3432 | return retcode; | ||
2699 | } | 3433 | } |
2700 | 3434 | ||
2701 | static int nv_request_irq(struct net_device *dev) | 3435 | static int nv_set_tx_csum(struct net_device *dev, u32 data) |
2702 | { | 3436 | { |
2703 | struct fe_priv *np = get_nvpriv(dev); | 3437 | struct fe_priv *np = netdev_priv(dev); |
3438 | |||
3439 | if (np->driver_data & DEV_HAS_CHECKSUM) | ||
3440 | return ethtool_op_set_tx_hw_csum(dev, data); | ||
3441 | else | ||
3442 | return -EOPNOTSUPP; | ||
3443 | } | ||
3444 | |||
3445 | static int nv_set_sg(struct net_device *dev, u32 data) | ||
3446 | { | ||
3447 | struct fe_priv *np = netdev_priv(dev); | ||
3448 | |||
3449 | if (np->driver_data & DEV_HAS_CHECKSUM) | ||
3450 | return ethtool_op_set_sg(dev, data); | ||
3451 | else | ||
3452 | return -EOPNOTSUPP; | ||
3453 | } | ||
3454 | |||
3455 | static int nv_get_stats_count(struct net_device *dev) | ||
3456 | { | ||
3457 | struct fe_priv *np = netdev_priv(dev); | ||
3458 | |||
3459 | if (np->driver_data & DEV_HAS_STATISTICS) | ||
3460 | return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); | ||
3461 | else | ||
3462 | return 0; | ||
3463 | } | ||
3464 | |||
3465 | static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) | ||
3466 | { | ||
3467 | struct fe_priv *np = netdev_priv(dev); | ||
3468 | |||
3469 | /* update stats */ | ||
3470 | nv_do_stats_poll((unsigned long)dev); | ||
3471 | |||
3472 | memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); | ||
3473 | } | ||
3474 | |||
3475 | static int nv_self_test_count(struct net_device *dev) | ||
3476 | { | ||
3477 | struct fe_priv *np = netdev_priv(dev); | ||
3478 | |||
3479 | if (np->driver_data & DEV_HAS_TEST_EXTENDED) | ||
3480 | return NV_TEST_COUNT_EXTENDED; | ||
3481 | else | ||
3482 | return NV_TEST_COUNT_BASE; | ||
3483 | } | ||
3484 | |||
3485 | static int nv_link_test(struct net_device *dev) | ||
3486 | { | ||
3487 | struct fe_priv *np = netdev_priv(dev); | ||
3488 | int mii_status; | ||
3489 | |||
3490 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | ||
3491 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | ||
3492 | |||
3493 | /* check phy link status */ | ||
3494 | if (!(mii_status & BMSR_LSTATUS)) | ||
3495 | return 0; | ||
3496 | else | ||
3497 | return 1; | ||
3498 | } | ||
3499 | |||
3500 | static int nv_register_test(struct net_device *dev) | ||
3501 | { | ||
3502 | u8 __iomem *base = get_hwbase(dev); | ||
3503 | int i = 0; | ||
3504 | u32 orig_read, new_read; | ||
3505 | |||
3506 | do { | ||
3507 | orig_read = readl(base + nv_registers_test[i].reg); | ||
3508 | |||
3509 | /* xor with mask to toggle bits */ | ||
3510 | orig_read ^= nv_registers_test[i].mask; | ||
3511 | |||
3512 | writel(orig_read, base + nv_registers_test[i].reg); | ||
3513 | |||
3514 | new_read = readl(base + nv_registers_test[i].reg); | ||
3515 | |||
3516 | if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) | ||
3517 | return 0; | ||
3518 | |||
3519 | /* restore original value */ | ||
3520 | orig_read ^= nv_registers_test[i].mask; | ||
3521 | writel(orig_read, base + nv_registers_test[i].reg); | ||
3522 | |||
3523 | } while (nv_registers_test[++i].reg != 0); | ||
3524 | |||
3525 | return 1; | ||
3526 | } | ||
3527 | |||
3528 | static int nv_interrupt_test(struct net_device *dev) | ||
3529 | { | ||
3530 | struct fe_priv *np = netdev_priv(dev); | ||
2704 | u8 __iomem *base = get_hwbase(dev); | 3531 | u8 __iomem *base = get_hwbase(dev); |
2705 | int ret = 1; | 3532 | int ret = 1; |
2706 | int i; | 3533 | int testcnt; |
3534 | u32 save_msi_flags, save_poll_interval = 0; | ||
2707 | 3535 | ||
2708 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | 3536 | if (netif_running(dev)) { |
2709 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 3537 | /* free current irq */ |
2710 | np->msi_x_entry[i].entry = i; | 3538 | nv_free_irq(dev); |
3539 | save_poll_interval = readl(base+NvRegPollingInterval); | ||
3540 | } | ||
3541 | |||
3542 | /* flag to test interrupt handler */ | ||
3543 | np->intr_test = 0; | ||
3544 | |||
3545 | /* setup test irq */ | ||
3546 | save_msi_flags = np->msi_flags; | ||
3547 | np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; | ||
3548 | np->msi_flags |= 0x001; /* setup 1 vector */ | ||
3549 | if (nv_request_irq(dev, 1)) | ||
3550 | return 0; | ||
3551 | |||
3552 | /* setup timer interrupt */ | ||
3553 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | ||
3554 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | ||
3555 | |||
3556 | nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); | ||
3557 | |||
3558 | /* wait for at least one interrupt */ | ||
3559 | msleep(100); | ||
3560 | |||
3561 | spin_lock_irq(&np->lock); | ||
3562 | |||
3563 | /* flag should be set within ISR */ | ||
3564 | testcnt = np->intr_test; | ||
3565 | if (!testcnt) | ||
3566 | ret = 2; | ||
3567 | |||
3568 | nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); | ||
3569 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3570 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
3571 | else | ||
3572 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
3573 | |||
3574 | spin_unlock_irq(&np->lock); | ||
3575 | |||
3576 | nv_free_irq(dev); | ||
3577 | |||
3578 | np->msi_flags = save_msi_flags; | ||
3579 | |||
3580 | if (netif_running(dev)) { | ||
3581 | writel(save_poll_interval, base + NvRegPollingInterval); | ||
3582 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | ||
3583 | /* restore original irq */ | ||
3584 | if (nv_request_irq(dev, 0)) | ||
3585 | return 0; | ||
3586 | } | ||
3587 | |||
3588 | return ret; | ||
3589 | } | ||
3590 | |||
3591 | static int nv_loopback_test(struct net_device *dev) | ||
3592 | { | ||
3593 | struct fe_priv *np = netdev_priv(dev); | ||
3594 | u8 __iomem *base = get_hwbase(dev); | ||
3595 | struct sk_buff *tx_skb, *rx_skb; | ||
3596 | dma_addr_t test_dma_addr; | ||
3597 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | ||
3598 | u32 Flags; | ||
3599 | int len, i, pkt_len; | ||
3600 | u8 *pkt_data; | ||
3601 | u32 filter_flags = 0; | ||
3602 | u32 misc1_flags = 0; | ||
3603 | int ret = 1; | ||
3604 | |||
3605 | if (netif_running(dev)) { | ||
3606 | nv_disable_irq(dev); | ||
3607 | filter_flags = readl(base + NvRegPacketFilterFlags); | ||
3608 | misc1_flags = readl(base + NvRegMisc1); | ||
3609 | } else { | ||
3610 | nv_txrx_reset(dev); | ||
3611 | } | ||
3612 | |||
3613 | /* reinit driver view of the rx queue */ | ||
3614 | set_bufsize(dev); | ||
3615 | nv_init_ring(dev); | ||
3616 | |||
3617 | /* setup hardware for loopback */ | ||
3618 | writel(NVREG_MISC1_FORCE, base + NvRegMisc1); | ||
3619 | writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); | ||
3620 | |||
3621 | /* reinit nic view of the rx queue */ | ||
3622 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
3623 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
3624 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
3625 | base + NvRegRingSizes); | ||
3626 | pci_push(base); | ||
3627 | |||
3628 | /* restart rx engine */ | ||
3629 | nv_start_rx(dev); | ||
3630 | nv_start_tx(dev); | ||
3631 | |||
3632 | /* setup packet for tx */ | ||
3633 | pkt_len = ETH_DATA_LEN; | ||
3634 | tx_skb = dev_alloc_skb(pkt_len); | ||
3635 | pkt_data = skb_put(tx_skb, pkt_len); | ||
3636 | for (i = 0; i < pkt_len; i++) | ||
3637 | pkt_data[i] = (u8)(i & 0xff); | ||
3638 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
3639 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | ||
3640 | |||
3641 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3642 | np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); | ||
3643 | np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | ||
3644 | } else { | ||
3645 | np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; | ||
3646 | np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; | ||
3647 | np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | ||
3648 | } | ||
3649 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3650 | pci_push(get_hwbase(dev)); | ||
3651 | |||
3652 | msleep(500); | ||
3653 | |||
3654 | /* check for rx of the packet */ | ||
3655 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3656 | Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); | ||
3657 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); | ||
3658 | |||
3659 | } else { | ||
3660 | Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); | ||
3661 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); | ||
3662 | } | ||
3663 | |||
3664 | if (Flags & NV_RX_AVAIL) { | ||
3665 | ret = 0; | ||
3666 | } else if (np->desc_ver == DESC_VER_1) { | ||
3667 | if (Flags & NV_RX_ERROR) | ||
3668 | ret = 0; | ||
3669 | } else { | ||
3670 | if (Flags & NV_RX2_ERROR) { | ||
3671 | ret = 0; | ||
2711 | } | 3672 | } |
2712 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | 3673 | } |
2713 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2714 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2715 | /* Request irq for rx handling */ | ||
2716 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2717 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2718 | pci_disable_msix(np->pci_dev); | ||
2719 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2720 | goto out_err; | ||
2721 | } | ||
2722 | /* Request irq for tx handling */ | ||
2723 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2724 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2725 | pci_disable_msix(np->pci_dev); | ||
2726 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2727 | goto out_free_rx; | ||
2728 | } | ||
2729 | /* Request irq for link and timer handling */ | ||
2730 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2731 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2732 | pci_disable_msix(np->pci_dev); | ||
2733 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2734 | goto out_free_tx; | ||
2735 | } | ||
2736 | /* map interrupts to their respective vector */ | ||
2737 | writel(0, base + NvRegMSIXMap0); | ||
2738 | writel(0, base + NvRegMSIXMap1); | ||
2739 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2740 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2741 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2742 | } else { | ||
2743 | /* Request irq for all interrupts */ | ||
2744 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2745 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2746 | pci_disable_msix(np->pci_dev); | ||
2747 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2748 | goto out_err; | ||
2749 | } | ||
2750 | 3674 | ||
2751 | /* map interrupts to vector 0 */ | 3675 | if (ret) { |
2752 | writel(0, base + NvRegMSIXMap0); | 3676 | if (len != pkt_len) { |
2753 | writel(0, base + NvRegMSIXMap1); | 3677 | ret = 0; |
3678 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", | ||
3679 | dev->name, len, pkt_len); | ||
3680 | } else { | ||
3681 | rx_skb = np->rx_skbuff[0]; | ||
3682 | for (i = 0; i < pkt_len; i++) { | ||
3683 | if (rx_skb->data[i] != (u8)(i & 0xff)) { | ||
3684 | ret = 0; | ||
3685 | dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", | ||
3686 | dev->name, i); | ||
3687 | break; | ||
3688 | } | ||
2754 | } | 3689 | } |
2755 | } | 3690 | } |
3691 | } else { | ||
3692 | dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); | ||
2756 | } | 3693 | } |
2757 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | 3694 | |
2758 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | 3695 | pci_unmap_page(np->pci_dev, test_dma_addr, |
2759 | np->msi_flags |= NV_MSI_ENABLED; | 3696 | tx_skb->end-tx_skb->data, |
2760 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | 3697 | PCI_DMA_TODEVICE); |
2761 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | 3698 | dev_kfree_skb_any(tx_skb); |
2762 | pci_disable_msi(np->pci_dev); | 3699 | |
2763 | np->msi_flags &= ~NV_MSI_ENABLED; | 3700 | /* stop engines */ |
2764 | goto out_err; | 3701 | nv_stop_rx(dev); |
3702 | nv_stop_tx(dev); | ||
3703 | nv_txrx_reset(dev); | ||
3704 | /* drain rx queue */ | ||
3705 | nv_drain_rx(dev); | ||
3706 | nv_drain_tx(dev); | ||
3707 | |||
3708 | if (netif_running(dev)) { | ||
3709 | writel(misc1_flags, base + NvRegMisc1); | ||
3710 | writel(filter_flags, base + NvRegPacketFilterFlags); | ||
3711 | nv_enable_irq(dev); | ||
3712 | } | ||
3713 | |||
3714 | return ret; | ||
3715 | } | ||
3716 | |||
3717 | static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) | ||
3718 | { | ||
3719 | struct fe_priv *np = netdev_priv(dev); | ||
3720 | u8 __iomem *base = get_hwbase(dev); | ||
3721 | int result; | ||
3722 | memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); | ||
3723 | |||
3724 | if (!nv_link_test(dev)) { | ||
3725 | test->flags |= ETH_TEST_FL_FAILED; | ||
3726 | buffer[0] = 1; | ||
3727 | } | ||
3728 | |||
3729 | if (test->flags & ETH_TEST_FL_OFFLINE) { | ||
3730 | if (netif_running(dev)) { | ||
3731 | netif_stop_queue(dev); | ||
3732 | spin_lock_bh(&dev->xmit_lock); | ||
3733 | spin_lock_irq(&np->lock); | ||
3734 | nv_disable_hw_interrupts(dev, np->irqmask); | ||
3735 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | ||
3736 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
3737 | } else { | ||
3738 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
2765 | } | 3739 | } |
3740 | /* stop engines */ | ||
3741 | nv_stop_rx(dev); | ||
3742 | nv_stop_tx(dev); | ||
3743 | nv_txrx_reset(dev); | ||
3744 | /* drain rx queue */ | ||
3745 | nv_drain_rx(dev); | ||
3746 | nv_drain_tx(dev); | ||
3747 | spin_unlock_irq(&np->lock); | ||
3748 | spin_unlock_bh(&dev->xmit_lock); | ||
3749 | } | ||
2766 | 3750 | ||
2767 | /* map interrupts to vector 0 */ | 3751 | if (!nv_register_test(dev)) { |
2768 | writel(0, base + NvRegMSIMap0); | 3752 | test->flags |= ETH_TEST_FL_FAILED; |
2769 | writel(0, base + NvRegMSIMap1); | 3753 | buffer[1] = 1; |
2770 | /* enable msi vector 0 */ | 3754 | } |
2771 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | 3755 | |
3756 | result = nv_interrupt_test(dev); | ||
3757 | if (result != 1) { | ||
3758 | test->flags |= ETH_TEST_FL_FAILED; | ||
3759 | buffer[2] = 1; | ||
3760 | } | ||
3761 | if (result == 0) { | ||
3762 | /* bail out */ | ||
3763 | return; | ||
3764 | } | ||
3765 | |||
3766 | if (!nv_loopback_test(dev)) { | ||
3767 | test->flags |= ETH_TEST_FL_FAILED; | ||
3768 | buffer[3] = 1; | ||
3769 | } | ||
3770 | |||
3771 | if (netif_running(dev)) { | ||
3772 | /* reinit driver view of the rx queue */ | ||
3773 | set_bufsize(dev); | ||
3774 | if (nv_init_ring(dev)) { | ||
3775 | if (!np->in_shutdown) | ||
3776 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3777 | } | ||
3778 | /* reinit nic view of the rx queue */ | ||
3779 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
3780 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
3781 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
3782 | base + NvRegRingSizes); | ||
3783 | pci_push(base); | ||
3784 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3785 | pci_push(base); | ||
3786 | /* restart rx engine */ | ||
3787 | nv_start_rx(dev); | ||
3788 | nv_start_tx(dev); | ||
3789 | netif_start_queue(dev); | ||
3790 | nv_enable_hw_interrupts(dev, np->irqmask); | ||
2772 | } | 3791 | } |
2773 | } | 3792 | } |
2774 | if (ret != 0) { | 3793 | } |
2775 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2776 | goto out_err; | ||
2777 | } | ||
2778 | 3794 | ||
2779 | return 0; | 3795 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) |
2780 | out_free_tx: | 3796 | { |
2781 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | 3797 | switch (stringset) { |
2782 | out_free_rx: | 3798 | case ETH_SS_STATS: |
2783 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | 3799 | memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); |
2784 | out_err: | 3800 | break; |
2785 | return 1; | 3801 | case ETH_SS_TEST: |
3802 | memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); | ||
3803 | break; | ||
3804 | } | ||
2786 | } | 3805 | } |
2787 | 3806 | ||
2788 | static void nv_free_irq(struct net_device *dev) | 3807 | static struct ethtool_ops ops = { |
3808 | .get_drvinfo = nv_get_drvinfo, | ||
3809 | .get_link = ethtool_op_get_link, | ||
3810 | .get_wol = nv_get_wol, | ||
3811 | .set_wol = nv_set_wol, | ||
3812 | .get_settings = nv_get_settings, | ||
3813 | .set_settings = nv_set_settings, | ||
3814 | .get_regs_len = nv_get_regs_len, | ||
3815 | .get_regs = nv_get_regs, | ||
3816 | .nway_reset = nv_nway_reset, | ||
3817 | .get_perm_addr = ethtool_op_get_perm_addr, | ||
3818 | .get_tso = ethtool_op_get_tso, | ||
3819 | .set_tso = nv_set_tso, | ||
3820 | .get_ringparam = nv_get_ringparam, | ||
3821 | .set_ringparam = nv_set_ringparam, | ||
3822 | .get_pauseparam = nv_get_pauseparam, | ||
3823 | .set_pauseparam = nv_set_pauseparam, | ||
3824 | .get_rx_csum = nv_get_rx_csum, | ||
3825 | .set_rx_csum = nv_set_rx_csum, | ||
3826 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
3827 | .set_tx_csum = nv_set_tx_csum, | ||
3828 | .get_sg = ethtool_op_get_sg, | ||
3829 | .set_sg = nv_set_sg, | ||
3830 | .get_strings = nv_get_strings, | ||
3831 | .get_stats_count = nv_get_stats_count, | ||
3832 | .get_ethtool_stats = nv_get_ethtool_stats, | ||
3833 | .self_test_count = nv_self_test_count, | ||
3834 | .self_test = nv_self_test, | ||
3835 | }; | ||
3836 | |||
3837 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
2789 | { | 3838 | { |
2790 | struct fe_priv *np = get_nvpriv(dev); | 3839 | struct fe_priv *np = get_nvpriv(dev); |
2791 | int i; | ||
2792 | 3840 | ||
2793 | if (np->msi_flags & NV_MSI_X_ENABLED) { | 3841 | spin_lock_irq(&np->lock); |
2794 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 3842 | |
2795 | free_irq(np->msi_x_entry[i].vector, dev); | 3843 | /* save vlan group */ |
2796 | } | 3844 | np->vlangrp = grp; |
2797 | pci_disable_msix(np->pci_dev); | 3845 | |
2798 | np->msi_flags &= ~NV_MSI_X_ENABLED; | 3846 | if (grp) { |
3847 | /* enable vlan on MAC */ | ||
3848 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | ||
2799 | } else { | 3849 | } else { |
2800 | free_irq(np->pci_dev->irq, dev); | 3850 | /* disable vlan on MAC */ |
2801 | if (np->msi_flags & NV_MSI_ENABLED) { | 3851 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; |
2802 | pci_disable_msi(np->pci_dev); | 3852 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; |
2803 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2804 | } | ||
2805 | } | 3853 | } |
2806 | } | 3854 | |
3855 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3856 | |||
3857 | spin_unlock_irq(&np->lock); | ||
3858 | }; | ||
3859 | |||
3860 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
3861 | { | ||
3862 | /* nothing to do */ | ||
3863 | }; | ||
2807 | 3864 | ||
2808 | static int nv_open(struct net_device *dev) | 3865 | static int nv_open(struct net_device *dev) |
2809 | { | 3866 | { |
@@ -2829,6 +3886,9 @@ static int nv_open(struct net_device *dev) | |||
2829 | 3886 | ||
2830 | writel(0, base + NvRegAdapterControl); | 3887 | writel(0, base + NvRegAdapterControl); |
2831 | 3888 | ||
3889 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) | ||
3890 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | ||
3891 | |||
2832 | /* 2) initialize descriptor rings */ | 3892 | /* 2) initialize descriptor rings */ |
2833 | set_bufsize(dev); | 3893 | set_bufsize(dev); |
2834 | oom = nv_init_ring(dev); | 3894 | oom = nv_init_ring(dev); |
@@ -2845,7 +3905,7 @@ static int nv_open(struct net_device *dev) | |||
2845 | 3905 | ||
2846 | /* 4) give hw rings */ | 3906 | /* 4) give hw rings */ |
2847 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | 3907 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
2848 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 3908 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
2849 | base + NvRegRingSizes); | 3909 | base + NvRegRingSizes); |
2850 | 3910 | ||
2851 | /* 5) continue setup */ | 3911 | /* 5) continue setup */ |
@@ -2887,7 +3947,8 @@ static int nv_open(struct net_device *dev) | |||
2887 | base + NvRegAdapterControl); | 3947 | base + NvRegAdapterControl); |
2888 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | 3948 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); |
2889 | writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); | 3949 | writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); |
2890 | writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); | 3950 | if (np->wolenabled) |
3951 | writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); | ||
2891 | 3952 | ||
2892 | i = readl(base + NvRegPowerState); | 3953 | i = readl(base + NvRegPowerState); |
2893 | if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) | 3954 | if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) |
@@ -2903,7 +3964,7 @@ static int nv_open(struct net_device *dev) | |||
2903 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 3964 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2904 | pci_push(base); | 3965 | pci_push(base); |
2905 | 3966 | ||
2906 | if (nv_request_irq(dev)) { | 3967 | if (nv_request_irq(dev, 0)) { |
2907 | goto out_drain; | 3968 | goto out_drain; |
2908 | } | 3969 | } |
2909 | 3970 | ||
@@ -2940,6 +4001,11 @@ static int nv_open(struct net_device *dev) | |||
2940 | } | 4001 | } |
2941 | if (oom) | 4002 | if (oom) |
2942 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 4003 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
4004 | |||
4005 | /* start statistics timer */ | ||
4006 | if (np->driver_data & DEV_HAS_STATISTICS) | ||
4007 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | ||
4008 | |||
2943 | spin_unlock_irq(&np->lock); | 4009 | spin_unlock_irq(&np->lock); |
2944 | 4010 | ||
2945 | return 0; | 4011 | return 0; |
@@ -2960,6 +4026,7 @@ static int nv_close(struct net_device *dev) | |||
2960 | 4026 | ||
2961 | del_timer_sync(&np->oom_kick); | 4027 | del_timer_sync(&np->oom_kick); |
2962 | del_timer_sync(&np->nic_poll); | 4028 | del_timer_sync(&np->nic_poll); |
4029 | del_timer_sync(&np->stats_poll); | ||
2963 | 4030 | ||
2964 | netif_stop_queue(dev); | 4031 | netif_stop_queue(dev); |
2965 | spin_lock_irq(&np->lock); | 4032 | spin_lock_irq(&np->lock); |
@@ -3019,6 +4086,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3019 | init_timer(&np->nic_poll); | 4086 | init_timer(&np->nic_poll); |
3020 | np->nic_poll.data = (unsigned long) dev; | 4087 | np->nic_poll.data = (unsigned long) dev; |
3021 | np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ | 4088 | np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ |
4089 | init_timer(&np->stats_poll); | ||
4090 | np->stats_poll.data = (unsigned long) dev; | ||
4091 | np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ | ||
3022 | 4092 | ||
3023 | err = pci_enable_device(pci_dev); | 4093 | err = pci_enable_device(pci_dev); |
3024 | if (err) { | 4094 | if (err) { |
@@ -3033,7 +4103,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3033 | if (err < 0) | 4103 | if (err < 0) |
3034 | goto out_disable; | 4104 | goto out_disable; |
3035 | 4105 | ||
3036 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL)) | 4106 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) |
3037 | np->register_size = NV_PCI_REGSZ_VER2; | 4107 | np->register_size = NV_PCI_REGSZ_VER2; |
3038 | else | 4108 | else |
3039 | np->register_size = NV_PCI_REGSZ_VER1; | 4109 | np->register_size = NV_PCI_REGSZ_VER1; |
@@ -3065,16 +4135,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3065 | /* packet format 3: supports 40-bit addressing */ | 4135 | /* packet format 3: supports 40-bit addressing */ |
3066 | np->desc_ver = DESC_VER_3; | 4136 | np->desc_ver = DESC_VER_3; |
3067 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | 4137 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
3068 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 4138 | if (dma_64bit) { |
3069 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 4139 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
3070 | pci_name(pci_dev)); | 4140 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
3071 | } else { | 4141 | pci_name(pci_dev)); |
3072 | dev->features |= NETIF_F_HIGHDMA; | 4142 | } else { |
3073 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | 4143 | dev->features |= NETIF_F_HIGHDMA; |
3074 | } | 4144 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); |
3075 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | 4145 | } |
3076 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | 4146 | if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
3077 | pci_name(pci_dev)); | 4147 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", |
4148 | pci_name(pci_dev)); | ||
4149 | } | ||
3078 | } | 4150 | } |
3079 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 4151 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
3080 | /* packet format 2: supports jumbo frames */ | 4152 | /* packet format 2: supports jumbo frames */ |
@@ -3107,13 +4179,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3107 | } | 4179 | } |
3108 | 4180 | ||
3109 | np->msi_flags = 0; | 4181 | np->msi_flags = 0; |
3110 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | 4182 | if ((id->driver_data & DEV_HAS_MSI) && msi) { |
3111 | np->msi_flags |= NV_MSI_CAPABLE; | 4183 | np->msi_flags |= NV_MSI_CAPABLE; |
3112 | } | 4184 | } |
3113 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | 4185 | if ((id->driver_data & DEV_HAS_MSI_X) && msix) { |
3114 | np->msi_flags |= NV_MSI_X_CAPABLE; | 4186 | np->msi_flags |= NV_MSI_X_CAPABLE; |
3115 | } | 4187 | } |
3116 | 4188 | ||
4189 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; | ||
4190 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { | ||
4191 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; | ||
4192 | } | ||
4193 | |||
4194 | |||
3117 | err = -ENOMEM; | 4195 | err = -ENOMEM; |
3118 | np->base = ioremap(addr, np->register_size); | 4196 | np->base = ioremap(addr, np->register_size); |
3119 | if (!np->base) | 4197 | if (!np->base) |
@@ -3122,21 +4200,38 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3122 | 4200 | ||
3123 | dev->irq = pci_dev->irq; | 4201 | dev->irq = pci_dev->irq; |
3124 | 4202 | ||
4203 | np->rx_ring_size = RX_RING_DEFAULT; | ||
4204 | np->tx_ring_size = TX_RING_DEFAULT; | ||
4205 | np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; | ||
4206 | np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; | ||
4207 | |||
3125 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4208 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3126 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | 4209 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
3127 | sizeof(struct ring_desc) * (RX_RING + TX_RING), | 4210 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
3128 | &np->ring_addr); | 4211 | &np->ring_addr); |
3129 | if (!np->rx_ring.orig) | 4212 | if (!np->rx_ring.orig) |
3130 | goto out_unmap; | 4213 | goto out_unmap; |
3131 | np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; | 4214 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
3132 | } else { | 4215 | } else { |
3133 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | 4216 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, |
3134 | sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | 4217 | sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
3135 | &np->ring_addr); | 4218 | &np->ring_addr); |
3136 | if (!np->rx_ring.ex) | 4219 | if (!np->rx_ring.ex) |
3137 | goto out_unmap; | 4220 | goto out_unmap; |
3138 | np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; | 4221 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
3139 | } | 4222 | } |
4223 | np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); | ||
4224 | np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); | ||
4225 | np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); | ||
4226 | np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); | ||
4227 | np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); | ||
4228 | if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) | ||
4229 | goto out_freering; | ||
4230 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | ||
4231 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | ||
4232 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
4233 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
4234 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
3140 | 4235 | ||
3141 | dev->open = nv_open; | 4236 | dev->open = nv_open; |
3142 | dev->stop = nv_close; | 4237 | dev->stop = nv_close; |
@@ -3258,9 +4353,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3258 | if (i == 33) { | 4353 | if (i == 33) { |
3259 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", | 4354 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", |
3260 | pci_name(pci_dev)); | 4355 | pci_name(pci_dev)); |
3261 | goto out_freering; | 4356 | goto out_error; |
3262 | } | 4357 | } |
3263 | 4358 | ||
3264 | /* reset it */ | 4359 | /* reset it */ |
3265 | phy_init(dev); | 4360 | phy_init(dev); |
3266 | 4361 | ||
@@ -3272,7 +4367,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3272 | err = register_netdev(dev); | 4367 | err = register_netdev(dev); |
3273 | if (err) { | 4368 | if (err) { |
3274 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | 4369 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); |
3275 | goto out_freering; | 4370 | goto out_error; |
3276 | } | 4371 | } |
3277 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | 4372 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", |
3278 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | 4373 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, |
@@ -3280,14 +4375,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3280 | 4375 | ||
3281 | return 0; | 4376 | return 0; |
3282 | 4377 | ||
3283 | out_freering: | 4378 | out_error: |
3284 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
3285 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | ||
3286 | np->rx_ring.orig, np->ring_addr); | ||
3287 | else | ||
3288 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
3289 | np->rx_ring.ex, np->ring_addr); | ||
3290 | pci_set_drvdata(pci_dev, NULL); | 4379 | pci_set_drvdata(pci_dev, NULL); |
4380 | out_freering: | ||
4381 | free_rings(dev); | ||
3291 | out_unmap: | 4382 | out_unmap: |
3292 | iounmap(get_hwbase(dev)); | 4383 | iounmap(get_hwbase(dev)); |
3293 | out_relreg: | 4384 | out_relreg: |
@@ -3303,15 +4394,11 @@ out: | |||
3303 | static void __devexit nv_remove(struct pci_dev *pci_dev) | 4394 | static void __devexit nv_remove(struct pci_dev *pci_dev) |
3304 | { | 4395 | { |
3305 | struct net_device *dev = pci_get_drvdata(pci_dev); | 4396 | struct net_device *dev = pci_get_drvdata(pci_dev); |
3306 | struct fe_priv *np = netdev_priv(dev); | ||
3307 | 4397 | ||
3308 | unregister_netdev(dev); | 4398 | unregister_netdev(dev); |
3309 | 4399 | ||
3310 | /* free all structures */ | 4400 | /* free all structures */ |
3311 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 4401 | free_rings(dev); |
3312 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); | ||
3313 | else | ||
3314 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); | ||
3315 | iounmap(get_hwbase(dev)); | 4402 | iounmap(get_hwbase(dev)); |
3316 | pci_release_regions(pci_dev); | 4403 | pci_release_regions(pci_dev); |
3317 | pci_disable_device(pci_dev); | 4404 | pci_disable_device(pci_dev); |
@@ -3374,11 +4461,43 @@ static struct pci_device_id pci_tbl[] = { | |||
3374 | }, | 4461 | }, |
3375 | { /* MCP55 Ethernet Controller */ | 4462 | { /* MCP55 Ethernet Controller */ |
3376 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 4463 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
3377 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, | 4464 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
3378 | }, | 4465 | }, |
3379 | { /* MCP55 Ethernet Controller */ | 4466 | { /* MCP55 Ethernet Controller */ |
3380 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 4467 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
3381 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, | 4468 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
4469 | }, | ||
4470 | { /* MCP61 Ethernet Controller */ | ||
4471 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), | ||
4472 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4473 | }, | ||
4474 | { /* MCP61 Ethernet Controller */ | ||
4475 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), | ||
4476 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4477 | }, | ||
4478 | { /* MCP61 Ethernet Controller */ | ||
4479 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), | ||
4480 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4481 | }, | ||
4482 | { /* MCP61 Ethernet Controller */ | ||
4483 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), | ||
4484 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4485 | }, | ||
4486 | { /* MCP65 Ethernet Controller */ | ||
4487 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), | ||
4488 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4489 | }, | ||
4490 | { /* MCP65 Ethernet Controller */ | ||
4491 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), | ||
4492 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4493 | }, | ||
4494 | { /* MCP65 Ethernet Controller */ | ||
4495 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), | ||
4496 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
4497 | }, | ||
4498 | { /* MCP65 Ethernet Controller */ | ||
4499 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), | ||
4500 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | ||
3382 | }, | 4501 | }, |
3383 | {0,}, | 4502 | {0,}, |
3384 | }; | 4503 | }; |
@@ -3408,10 +4527,12 @@ module_param(optimization_mode, int, 0); | |||
3408 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | 4527 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
3409 | module_param(poll_interval, int, 0); | 4528 | module_param(poll_interval, int, 0); |
3410 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | 4529 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
3411 | module_param(disable_msi, int, 0); | 4530 | module_param(msi, int, 0); |
3412 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | 4531 | MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); |
3413 | module_param(disable_msix, int, 0); | 4532 | module_param(msix, int, 0); |
3414 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | 4533 | MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); |
4534 | module_param(dma_64bit, int, 0); | ||
4535 | MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); | ||
3415 | 4536 | ||
3416 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | 4537 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
3417 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | 4538 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |