diff options
| author | David S. Miller <davem@davemloft.net> | 2008-05-13 04:09:15 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2008-05-13 04:09:15 -0400 |
| commit | 47ac3199acb0676b58491b29418283f37a116952 (patch) | |
| tree | 42cbd2efc6aae4c52ce150938142523911a889c5 | |
| parent | 608961a5eca8d3c6bd07172febc27b5559408c5d (diff) | |
| parent | 993245908ec35c071315479e20602577b7b5dde6 (diff) | |
Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
40 files changed, 3081 insertions, 547 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index c3a533d5d382..0cc47b9942b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2104,12 +2104,10 @@ L: netdev@vger.kernel.org | |||
| 2104 | S: Maintained | 2104 | S: Maintained |
| 2105 | 2105 | ||
| 2106 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) | 2106 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) |
| 2107 | P: Auke Kok | ||
| 2108 | M: auke-jan.h.kok@intel.com | ||
| 2109 | P: Jesse Brandeburg | ||
| 2110 | M: jesse.brandeburg@intel.com | ||
| 2111 | P: Jeff Kirsher | 2107 | P: Jeff Kirsher |
| 2112 | M: jeffrey.t.kirsher@intel.com | 2108 | M: jeffrey.t.kirsher@intel.com |
| 2109 | P: Jesse Brandeburg | ||
| 2110 | M: jesse.brandeburg@intel.com | ||
| 2113 | P: Bruce Allan | 2111 | P: Bruce Allan |
| 2114 | M: bruce.w.allan@intel.com | 2112 | M: bruce.w.allan@intel.com |
| 2115 | P: John Ronciak | 2113 | P: John Ronciak |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ce6de5a7e28..937e8258981d 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -53,11 +53,13 @@ int register_memory_notifier(struct notifier_block *nb) | |||
| 53 | { | 53 | { |
| 54 | return blocking_notifier_chain_register(&memory_chain, nb); | 54 | return blocking_notifier_chain_register(&memory_chain, nb); |
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL(register_memory_notifier); | ||
| 56 | 57 | ||
| 57 | void unregister_memory_notifier(struct notifier_block *nb) | 58 | void unregister_memory_notifier(struct notifier_block *nb) |
| 58 | { | 59 | { |
| 59 | blocking_notifier_chain_unregister(&memory_chain, nb); | 60 | blocking_notifier_chain_unregister(&memory_chain, nb); |
| 60 | } | 61 | } |
| 62 | EXPORT_SYMBOL(unregister_memory_notifier); | ||
| 61 | 63 | ||
| 62 | /* | 64 | /* |
| 63 | * register_memory - Setup a sysfs device for a memory block | 65 | * register_memory - Setup a sysfs device for a memory block |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d27f54a2df77..9f6cc8a56073 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2426,7 +2426,7 @@ config CHELSIO_T3 | |||
| 2426 | 2426 | ||
| 2427 | config EHEA | 2427 | config EHEA |
| 2428 | tristate "eHEA Ethernet support" | 2428 | tristate "eHEA Ethernet support" |
| 2429 | depends on IBMEBUS && INET && SPARSEMEM | 2429 | depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG |
| 2430 | select INET_LRO | 2430 | select INET_LRO |
| 2431 | ---help--- | 2431 | ---help--- |
| 2432 | This driver supports the IBM pSeries eHEA ethernet adapter. | 2432 | This driver supports the IBM pSeries eHEA ethernet adapter. |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 0afe522b8f7b..9c2394d49428 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | 2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. |
| 3 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> | 3 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> |
| 4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | 4 | * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> |
| 5 | * | 5 | * |
| 6 | * Derived from Intel e1000 driver | 6 | * Derived from Intel e1000 driver |
| 7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. |
| @@ -36,7 +36,6 @@ | |||
| 36 | * A very incomplete list of things that need to be dealt with: | 36 | * A very incomplete list of things that need to be dealt with: |
| 37 | * | 37 | * |
| 38 | * TODO: | 38 | * TODO: |
| 39 | * Wake on LAN. | ||
| 40 | * Add more ethtool functions. | 39 | * Add more ethtool functions. |
| 41 | * Fix abstruse irq enable/disable condition described here: | 40 | * Fix abstruse irq enable/disable condition described here: |
| 42 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 | 41 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 |
| @@ -638,21 +637,18 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) | |||
| 638 | } | 637 | } |
| 639 | 638 | ||
| 640 | /* | 639 | /* |
| 641 | *TODO: do something or get rid of this | 640 | * Force the PHY into power saving mode using vendor magic. |
| 642 | */ | 641 | */ |
| 643 | #ifdef CONFIG_PM | 642 | #ifdef CONFIG_PM |
| 644 | static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) | 643 | static void atl1_phy_enter_power_saving(struct atl1_hw *hw) |
| 645 | { | 644 | { |
| 646 | /* s32 ret_val; | 645 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); |
| 647 | * u16 phy_data; | 646 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); |
| 648 | */ | 647 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); |
| 648 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); | ||
| 649 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); | ||
| 650 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0); | ||
| 649 | 651 | ||
| 650 | /* | ||
| 651 | ret_val = atl1_write_phy_reg(hw, ...); | ||
| 652 | ret_val = atl1_write_phy_reg(hw, ...); | ||
| 653 | .... | ||
| 654 | */ | ||
| 655 | return 0; | ||
| 656 | } | 652 | } |
| 657 | #endif | 653 | #endif |
| 658 | 654 | ||
| @@ -2784,64 +2780,93 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 2784 | struct atl1_hw *hw = &adapter->hw; | 2780 | struct atl1_hw *hw = &adapter->hw; |
| 2785 | u32 ctrl = 0; | 2781 | u32 ctrl = 0; |
| 2786 | u32 wufc = adapter->wol; | 2782 | u32 wufc = adapter->wol; |
| 2783 | u32 val; | ||
| 2784 | int retval; | ||
| 2785 | u16 speed; | ||
| 2786 | u16 duplex; | ||
| 2787 | 2787 | ||
| 2788 | netif_device_detach(netdev); | 2788 | netif_device_detach(netdev); |
| 2789 | if (netif_running(netdev)) | 2789 | if (netif_running(netdev)) |
| 2790 | atl1_down(adapter); | 2790 | atl1_down(adapter); |
| 2791 | 2791 | ||
| 2792 | retval = pci_save_state(pdev); | ||
| 2793 | if (retval) | ||
| 2794 | return retval; | ||
| 2795 | |||
| 2792 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | 2796 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
| 2793 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | 2797 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
| 2794 | if (ctrl & BMSR_LSTATUS) | 2798 | val = ctrl & BMSR_LSTATUS; |
| 2799 | if (val) | ||
| 2795 | wufc &= ~ATLX_WUFC_LNKC; | 2800 | wufc &= ~ATLX_WUFC_LNKC; |
| 2796 | 2801 | ||
| 2797 | /* reduce speed to 10/100M */ | 2802 | if (val && wufc) { |
| 2798 | if (wufc) { | 2803 | val = atl1_get_speed_and_duplex(hw, &speed, &duplex); |
| 2799 | atl1_phy_enter_power_saving(hw); | 2804 | if (val) { |
| 2800 | /* if resume, let driver to re- setup link */ | 2805 | if (netif_msg_ifdown(adapter)) |
| 2801 | hw->phy_configured = false; | 2806 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 2802 | atl1_set_mac_addr(hw); | 2807 | "error getting speed/duplex\n"); |
| 2803 | atlx_set_multi(netdev); | 2808 | goto disable_wol; |
| 2809 | } | ||
| 2804 | 2810 | ||
| 2805 | ctrl = 0; | 2811 | ctrl = 0; |
| 2806 | /* turn on magic packet wol */ | ||
| 2807 | if (wufc & ATLX_WUFC_MAG) | ||
| 2808 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
| 2809 | 2812 | ||
| 2810 | /* turn on Link change WOL */ | 2813 | /* enable magic packet WOL */ |
| 2811 | if (wufc & ATLX_WUFC_LNKC) | 2814 | if (wufc & ATLX_WUFC_MAG) |
| 2812 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | 2815 | ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); |
| 2813 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | 2816 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); |
| 2814 | 2817 | ioread32(hw->hw_addr + REG_WOL_CTRL); | |
| 2815 | /* turn on all-multi mode if wake on multicast is enabled */ | 2818 | |
| 2816 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | 2819 | /* configure the mac */ |
| 2817 | ctrl &= ~MAC_CTRL_DBG; | 2820 | ctrl = MAC_CTRL_RX_EN; |
| 2818 | ctrl &= ~MAC_CTRL_PROMIS_EN; | 2821 | ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : |
| 2819 | if (wufc & ATLX_WUFC_MC) | 2822 | MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); |
| 2820 | ctrl |= MAC_CTRL_MC_ALL_EN; | 2823 | if (duplex == FULL_DUPLEX) |
| 2821 | else | 2824 | ctrl |= MAC_CTRL_DUPLX; |
| 2822 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | 2825 | ctrl |= (((u32)adapter->hw.preamble_len & |
| 2823 | 2826 | MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); | |
| 2824 | /* turn on broadcast mode if wake on-BC is enabled */ | 2827 | if (adapter->vlgrp) |
| 2825 | if (wufc & ATLX_WUFC_BC) | 2828 | ctrl |= MAC_CTRL_RMV_VLAN; |
| 2829 | if (wufc & ATLX_WUFC_MAG) | ||
| 2826 | ctrl |= MAC_CTRL_BC_EN; | 2830 | ctrl |= MAC_CTRL_BC_EN; |
| 2827 | else | ||
| 2828 | ctrl &= ~MAC_CTRL_BC_EN; | ||
| 2829 | |||
| 2830 | /* enable RX */ | ||
| 2831 | ctrl |= MAC_CTRL_RX_EN; | ||
| 2832 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | 2831 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); |
| 2833 | pci_enable_wake(pdev, PCI_D3hot, 1); | 2832 | ioread32(hw->hw_addr + REG_MAC_CTRL); |
| 2834 | pci_enable_wake(pdev, PCI_D3cold, 1); | 2833 | |
| 2835 | } else { | 2834 | /* poke the PHY */ |
| 2836 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | 2835 | ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); |
| 2837 | pci_enable_wake(pdev, PCI_D3hot, 0); | 2836 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; |
| 2838 | pci_enable_wake(pdev, PCI_D3cold, 0); | 2837 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); |
| 2838 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); | ||
| 2839 | |||
| 2840 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | ||
| 2841 | goto exit; | ||
| 2839 | } | 2842 | } |
| 2840 | 2843 | ||
| 2841 | pci_save_state(pdev); | 2844 | if (!val && wufc) { |
| 2845 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
| 2846 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
| 2847 | ioread32(hw->hw_addr + REG_WOL_CTRL); | ||
| 2848 | iowrite32(0, hw->hw_addr + REG_MAC_CTRL); | ||
| 2849 | ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
| 2850 | hw->phy_configured = false; | ||
| 2851 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | ||
| 2852 | goto exit; | ||
| 2853 | } | ||
| 2854 | |||
| 2855 | disable_wol: | ||
| 2856 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
| 2857 | ioread32(hw->hw_addr + REG_WOL_CTRL); | ||
| 2858 | ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); | ||
| 2859 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
| 2860 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); | ||
| 2861 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); | ||
| 2862 | atl1_phy_enter_power_saving(hw); | ||
| 2863 | hw->phy_configured = false; | ||
| 2864 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
| 2865 | exit: | ||
| 2866 | if (netif_running(netdev)) | ||
| 2867 | pci_disable_msi(adapter->pdev); | ||
| 2842 | pci_disable_device(pdev); | 2868 | pci_disable_device(pdev); |
| 2843 | 2869 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
| 2844 | pci_set_power_state(pdev, PCI_D3hot); | ||
| 2845 | 2870 | ||
| 2846 | return 0; | 2871 | return 0; |
| 2847 | } | 2872 | } |
| @@ -2855,20 +2880,26 @@ static int atl1_resume(struct pci_dev *pdev) | |||
| 2855 | pci_set_power_state(pdev, PCI_D0); | 2880 | pci_set_power_state(pdev, PCI_D0); |
| 2856 | pci_restore_state(pdev); | 2881 | pci_restore_state(pdev); |
| 2857 | 2882 | ||
| 2858 | /* FIXME: check and handle */ | ||
| 2859 | err = pci_enable_device(pdev); | 2883 | err = pci_enable_device(pdev); |
| 2884 | if (err) { | ||
| 2885 | if (netif_msg_ifup(adapter)) | ||
| 2886 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
| 2887 | "error enabling pci device\n"); | ||
| 2888 | return err; | ||
| 2889 | } | ||
| 2890 | |||
| 2891 | pci_set_master(pdev); | ||
| 2892 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
| 2860 | pci_enable_wake(pdev, PCI_D3hot, 0); | 2893 | pci_enable_wake(pdev, PCI_D3hot, 0); |
| 2861 | pci_enable_wake(pdev, PCI_D3cold, 0); | 2894 | pci_enable_wake(pdev, PCI_D3cold, 0); |
| 2862 | 2895 | ||
| 2863 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | 2896 | atl1_reset_hw(&adapter->hw); |
| 2864 | atl1_reset(adapter); | 2897 | adapter->cmb.cmb->int_stats = 0; |
| 2865 | 2898 | ||
| 2866 | if (netif_running(netdev)) | 2899 | if (netif_running(netdev)) |
| 2867 | atl1_up(adapter); | 2900 | atl1_up(adapter); |
| 2868 | netif_device_attach(netdev); | 2901 | netif_device_attach(netdev); |
| 2869 | 2902 | ||
| 2870 | atl1_via_workaround(adapter); | ||
| 2871 | |||
| 2872 | return 0; | 2903 | return 0; |
| 2873 | } | 2904 | } |
| 2874 | #else | 2905 | #else |
| @@ -2876,6 +2907,13 @@ static int atl1_resume(struct pci_dev *pdev) | |||
| 2876 | #define atl1_resume NULL | 2907 | #define atl1_resume NULL |
| 2877 | #endif | 2908 | #endif |
| 2878 | 2909 | ||
| 2910 | static void atl1_shutdown(struct pci_dev *pdev) | ||
| 2911 | { | ||
| 2912 | #ifdef CONFIG_PM | ||
| 2913 | atl1_suspend(pdev, PMSG_SUSPEND); | ||
| 2914 | #endif | ||
| 2915 | } | ||
| 2916 | |||
| 2879 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2917 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 2880 | static void atl1_poll_controller(struct net_device *netdev) | 2918 | static void atl1_poll_controller(struct net_device *netdev) |
| 2881 | { | 2919 | { |
| @@ -3122,7 +3160,8 @@ static struct pci_driver atl1_driver = { | |||
| 3122 | .probe = atl1_probe, | 3160 | .probe = atl1_probe, |
| 3123 | .remove = __devexit_p(atl1_remove), | 3161 | .remove = __devexit_p(atl1_remove), |
| 3124 | .suspend = atl1_suspend, | 3162 | .suspend = atl1_suspend, |
| 3125 | .resume = atl1_resume | 3163 | .resume = atl1_resume, |
| 3164 | .shutdown = atl1_shutdown | ||
| 3126 | }; | 3165 | }; |
| 3127 | 3166 | ||
| 3128 | /* | 3167 | /* |
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 51893d66eae1..a5015b14a429 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | 2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. |
| 3 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> | 3 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> |
| 4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | 4 | * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> |
| 5 | * | 5 | * |
| 6 | * Derived from Intel e1000 driver | 6 | * Derived from Intel e1000 driver |
| 7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. |
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index f06b854e2501..b3e7fcf0f6e7 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | 3 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. |
| 4 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> | 4 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> |
| 5 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | 5 | * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> |
| 6 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | 6 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. |
| 7 | * | 7 | * |
| 8 | * Derived from Intel e1000 driver | 8 | * Derived from Intel e1000 driver |
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h index 3be7c09734d4..297a03da6b7f 100644 --- a/drivers/net/atlx/atlx.h +++ b/drivers/net/atlx/atlx.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | 3 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. |
| 4 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> | 4 | * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> |
| 5 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | 5 | * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> |
| 6 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | 6 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. |
| 7 | * | 7 | * |
| 8 | * Derived from Intel e1000 driver | 8 | * Derived from Intel e1000 driver |
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| 31 | 31 | ||
| 32 | #define ATLX_DRIVER_VERSION "2.1.1" | 32 | #define ATLX_DRIVER_VERSION "2.1.3" |
| 33 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ | 33 | MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ |
| 34 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); | 34 | Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); |
| 35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
| @@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION); | |||
| 460 | #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ | 460 | #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ |
| 461 | #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ | 461 | #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ |
| 462 | 462 | ||
| 463 | #define MII_DBG_ADDR 0x1D | ||
| 464 | #define MII_DBG_DATA 0x1E | ||
| 465 | |||
| 463 | /* PCI Command Register Bit Definitions */ | 466 | /* PCI Command Register Bit Definitions */ |
| 464 | #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ | 467 | #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ |
| 465 | #define CMD_IO_SPACE 0x0001 | 468 | #define CMD_IO_SPACE 0x0001 |
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 4fdb13f8447b..acebe431d068 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
| @@ -71,6 +71,7 @@ enum { /* adapter flags */ | |||
| 71 | USING_MSIX = (1 << 2), | 71 | USING_MSIX = (1 << 2), |
| 72 | QUEUES_BOUND = (1 << 3), | 72 | QUEUES_BOUND = (1 << 3), |
| 73 | TP_PARITY_INIT = (1 << 4), | 73 | TP_PARITY_INIT = (1 << 4), |
| 74 | NAPI_INIT = (1 << 5), | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | struct fl_pg_chunk { | 77 | struct fl_pg_chunk { |
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 91ee7277b813..579bee42a5cb 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h | |||
| @@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index); | |||
| 698 | void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); | 698 | void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); |
| 699 | int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, | 699 | int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, |
| 700 | int reset); | 700 | int reset); |
| 701 | int t3_replay_prep_adapter(struct adapter *adapter); | ||
| 701 | void t3_led_ready(struct adapter *adapter); | 702 | void t3_led_ready(struct adapter *adapter); |
| 702 | void t3_fatal_err(struct adapter *adapter); | 703 | void t3_fatal_err(struct adapter *adapter); |
| 703 | void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); | 704 | void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index ce949d5fae39..3a3127216791 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
| @@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap) | |||
| 421 | netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, | 421 | netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, |
| 422 | 64); | 422 | 64); |
| 423 | } | 423 | } |
| 424 | |||
| 425 | /* | ||
| 426 | * netif_napi_add() can be called only once per napi_struct because it | ||
| 427 | * adds each new napi_struct to a list. Be careful not to call it a | ||
| 428 | * second time, e.g., during EEH recovery, by making a note of it. | ||
| 429 | */ | ||
| 430 | adap->flags |= NAPI_INIT; | ||
| 424 | } | 431 | } |
| 425 | 432 | ||
| 426 | /* | 433 | /* |
| @@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap) | |||
| 896 | goto out; | 903 | goto out; |
| 897 | 904 | ||
| 898 | setup_rss(adap); | 905 | setup_rss(adap); |
| 899 | init_napi(adap); | 906 | if (!(adap->flags & NAPI_INIT)) |
| 907 | init_napi(adap); | ||
| 900 | adap->flags |= FULL_INIT_DONE; | 908 | adap->flags |= FULL_INIT_DONE; |
| 901 | } | 909 | } |
| 902 | 910 | ||
| @@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev) | |||
| 999 | return 0; | 1007 | return 0; |
| 1000 | 1008 | ||
| 1001 | if (!adap_up && (err = cxgb_up(adapter)) < 0) | 1009 | if (!adap_up && (err = cxgb_up(adapter)) < 0) |
| 1002 | return err; | 1010 | goto out; |
| 1003 | 1011 | ||
| 1004 | t3_tp_set_offload_mode(adapter, 1); | 1012 | t3_tp_set_offload_mode(adapter, 1); |
| 1005 | tdev->lldev = adapter->port[0]; | 1013 | tdev->lldev = adapter->port[0]; |
| @@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev) | |||
| 1061 | int other_ports = adapter->open_device_map & PORT_MASK; | 1069 | int other_ports = adapter->open_device_map & PORT_MASK; |
| 1062 | int err; | 1070 | int err; |
| 1063 | 1071 | ||
| 1064 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { | 1072 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) |
| 1065 | quiesce_rx(adapter); | ||
| 1066 | return err; | 1073 | return err; |
| 1067 | } | ||
| 1068 | 1074 | ||
| 1069 | set_bit(pi->port_id, &adapter->open_device_map); | 1075 | set_bit(pi->port_id, &adapter->open_device_map); |
| 1070 | if (is_offload(adapter) && !ofld_disable) { | 1076 | if (is_offload(adapter) && !ofld_disable) { |
| @@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, | |||
| 2424 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) | 2430 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) |
| 2425 | offload_close(&adapter->tdev); | 2431 | offload_close(&adapter->tdev); |
| 2426 | 2432 | ||
| 2427 | /* Free sge resources */ | ||
| 2428 | t3_free_sge_resources(adapter); | ||
| 2429 | |||
| 2430 | adapter->flags &= ~FULL_INIT_DONE; | 2433 | adapter->flags &= ~FULL_INIT_DONE; |
| 2431 | 2434 | ||
| 2432 | pci_disable_device(pdev); | 2435 | pci_disable_device(pdev); |
| 2433 | 2436 | ||
| 2434 | /* Request a slot slot reset. */ | 2437 | /* Request a slot reset. */ |
| 2435 | return PCI_ERS_RESULT_NEED_RESET; | 2438 | return PCI_ERS_RESULT_NEED_RESET; |
| 2436 | } | 2439 | } |
| 2437 | 2440 | ||
| @@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) | |||
| 2448 | if (pci_enable_device(pdev)) { | 2451 | if (pci_enable_device(pdev)) { |
| 2449 | dev_err(&pdev->dev, | 2452 | dev_err(&pdev->dev, |
| 2450 | "Cannot re-enable PCI device after reset.\n"); | 2453 | "Cannot re-enable PCI device after reset.\n"); |
| 2451 | return PCI_ERS_RESULT_DISCONNECT; | 2454 | goto err; |
| 2452 | } | 2455 | } |
| 2453 | pci_set_master(pdev); | 2456 | pci_set_master(pdev); |
| 2457 | pci_restore_state(pdev); | ||
| 2454 | 2458 | ||
| 2455 | t3_prep_adapter(adapter, adapter->params.info, 1); | 2459 | /* Free sge resources */ |
| 2460 | t3_free_sge_resources(adapter); | ||
| 2461 | |||
| 2462 | if (t3_replay_prep_adapter(adapter)) | ||
| 2463 | goto err; | ||
| 2456 | 2464 | ||
| 2457 | return PCI_ERS_RESULT_RECOVERED; | 2465 | return PCI_ERS_RESULT_RECOVERED; |
| 2466 | err: | ||
| 2467 | return PCI_ERS_RESULT_DISCONNECT; | ||
| 2458 | } | 2468 | } |
| 2459 | 2469 | ||
| 2460 | /** | 2470 | /** |
| @@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev) | |||
| 2483 | netif_device_attach(netdev); | 2493 | netif_device_attach(netdev); |
| 2484 | } | 2494 | } |
| 2485 | } | 2495 | } |
| 2486 | |||
| 2487 | if (is_offload(adapter)) { | ||
| 2488 | __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); | ||
| 2489 | if (offload_open(adapter->port[0])) | ||
| 2490 | printk(KERN_WARNING | ||
| 2491 | "Could not bring back offload capabilities\n"); | ||
| 2492 | } | ||
| 2493 | } | 2496 | } |
| 2494 | 2497 | ||
| 2495 | static struct pci_error_handlers t3_err_handler = { | 2498 | static struct pci_error_handlers t3_err_handler = { |
| @@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
| 2608 | } | 2611 | } |
| 2609 | 2612 | ||
| 2610 | pci_set_master(pdev); | 2613 | pci_set_master(pdev); |
| 2614 | pci_save_state(pdev); | ||
| 2611 | 2615 | ||
| 2612 | mmio_start = pci_resource_start(pdev, 0); | 2616 | mmio_start = pci_resource_start(pdev, 0); |
| 2613 | mmio_len = pci_resource_len(pdev, 0); | 2617 | mmio_len = pci_resource_len(pdev, 0); |
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 02dbbb300929..567178879345 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h | |||
| @@ -444,6 +444,14 @@ | |||
| 444 | 444 | ||
| 445 | #define A_PCIE_CFG 0x88 | 445 | #define A_PCIE_CFG 0x88 |
| 446 | 446 | ||
| 447 | #define S_ENABLELINKDWNDRST 21 | ||
| 448 | #define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) | ||
| 449 | #define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) | ||
| 450 | |||
| 451 | #define S_ENABLELINKDOWNRST 20 | ||
| 452 | #define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) | ||
| 453 | #define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) | ||
| 454 | |||
| 447 | #define S_PCIE_CLIDECEN 16 | 455 | #define S_PCIE_CLIDECEN 16 |
| 448 | #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) | 456 | #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) |
| 449 | #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) | 457 | #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 98a6bbd11d4c..796eb305cdc3 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
| @@ -539,6 +539,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | |||
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | /** | 541 | /** |
| 542 | * t3_reset_qset - reset a sge qset | ||
| 543 | * @q: the queue set | ||
| 544 | * | ||
| 545 | * Reset the qset structure. | ||
| 546 | * the NAPI structure is preserved in the event of | ||
| 547 | * the qset's reincarnation, for example during EEH recovery. | ||
| 548 | */ | ||
| 549 | static void t3_reset_qset(struct sge_qset *q) | ||
| 550 | { | ||
| 551 | if (q->adap && | ||
| 552 | !(q->adap->flags & NAPI_INIT)) { | ||
| 553 | memset(q, 0, sizeof(*q)); | ||
| 554 | return; | ||
| 555 | } | ||
| 556 | |||
| 557 | q->adap = NULL; | ||
| 558 | memset(&q->rspq, 0, sizeof(q->rspq)); | ||
| 559 | memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); | ||
| 560 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); | ||
| 561 | q->txq_stopped = 0; | ||
| 562 | memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); | ||
| 563 | } | ||
| 564 | |||
| 565 | |||
| 566 | /** | ||
| 542 | * free_qset - free the resources of an SGE queue set | 567 | * free_qset - free the resources of an SGE queue set |
| 543 | * @adapter: the adapter owning the queue set | 568 | * @adapter: the adapter owning the queue set |
| 544 | * @q: the queue set | 569 | * @q: the queue set |
| @@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) | |||
| 594 | q->rspq.desc, q->rspq.phys_addr); | 619 | q->rspq.desc, q->rspq.phys_addr); |
| 595 | } | 620 | } |
| 596 | 621 | ||
| 597 | memset(q, 0, sizeof(*q)); | 622 | t3_reset_qset(q); |
| 598 | } | 623 | } |
| 599 | 624 | ||
| 600 | /** | 625 | /** |
| @@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data) | |||
| 1365 | */ | 1390 | */ |
| 1366 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | 1391 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) |
| 1367 | { | 1392 | { |
| 1368 | int ret; | 1393 | int ret; |
| 1369 | local_bh_disable(); | 1394 | local_bh_disable(); |
| 1370 | ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); | 1395 | ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); |
| 1371 | local_bh_enable(); | 1396 | local_bh_enable(); |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index a99496a431c4..d405a932c73a 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
| @@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap) | |||
| 3264 | 3264 | ||
| 3265 | t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); | 3265 | t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); |
| 3266 | t3_set_reg_field(adap, A_PCIE_CFG, 0, | 3266 | t3_set_reg_field(adap, A_PCIE_CFG, 0, |
| 3267 | F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | | ||
| 3267 | F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); | 3268 | F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); |
| 3268 | } | 3269 | } |
| 3269 | 3270 | ||
| @@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter) | |||
| 3655 | t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, | 3656 | t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, |
| 3656 | F_GPIO0_OUT_VAL); | 3657 | F_GPIO0_OUT_VAL); |
| 3657 | } | 3658 | } |
| 3659 | |||
| 3660 | int t3_replay_prep_adapter(struct adapter *adapter) | ||
| 3661 | { | ||
| 3662 | const struct adapter_info *ai = adapter->params.info; | ||
| 3663 | unsigned int i, j = 0; | ||
| 3664 | int ret; | ||
| 3665 | |||
| 3666 | early_hw_init(adapter, ai); | ||
| 3667 | ret = init_parity(adapter); | ||
| 3668 | if (ret) | ||
| 3669 | return ret; | ||
| 3670 | |||
| 3671 | for_each_port(adapter, i) { | ||
| 3672 | struct port_info *p = adap2pinfo(adapter, i); | ||
| 3673 | while (!adapter->params.vpd.port_type[j]) | ||
| 3674 | ++j; | ||
| 3675 | |||
| 3676 | p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, | ||
| 3677 | ai->mdio_ops); | ||
| 3678 | |||
| 3679 | p->phy.ops->power_down(&p->phy, 1); | ||
| 3680 | ++j; | ||
| 3681 | } | ||
| 3682 | |||
| 3683 | return 0; | ||
| 3684 | } | ||
| 3685 | |||
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index e6fe2614ea6d..d45bcd2660af 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
| @@ -117,6 +117,9 @@ typedef struct board_info { | |||
| 117 | 117 | ||
| 118 | struct mutex addr_lock; /* phy and eeprom access lock */ | 118 | struct mutex addr_lock; /* phy and eeprom access lock */ |
| 119 | 119 | ||
| 120 | struct delayed_work phy_poll; | ||
| 121 | struct net_device *ndev; | ||
| 122 | |||
| 120 | spinlock_t lock; | 123 | spinlock_t lock; |
| 121 | 124 | ||
| 122 | struct mii_if_info mii; | 125 | struct mii_if_info mii; |
| @@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width) | |||
| 297 | } | 300 | } |
| 298 | } | 301 | } |
| 299 | 302 | ||
| 303 | static void dm9000_schedule_poll(board_info_t *db) | ||
| 304 | { | ||
| 305 | schedule_delayed_work(&db->phy_poll, HZ * 2); | ||
| 306 | } | ||
| 300 | 307 | ||
| 301 | /* Our watchdog timed out. Called by the networking layer */ | 308 | /* Our watchdog timed out. Called by the networking layer */ |
| 302 | static void dm9000_timeout(struct net_device *dev) | 309 | static void dm9000_timeout(struct net_device *dev) |
| @@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = { | |||
| 465 | .set_eeprom = dm9000_set_eeprom, | 472 | .set_eeprom = dm9000_set_eeprom, |
| 466 | }; | 473 | }; |
| 467 | 474 | ||
| 475 | static void | ||
| 476 | dm9000_poll_work(struct work_struct *w) | ||
| 477 | { | ||
| 478 | struct delayed_work *dw = container_of(w, struct delayed_work, work); | ||
| 479 | board_info_t *db = container_of(dw, board_info_t, phy_poll); | ||
| 480 | |||
| 481 | mii_check_media(&db->mii, netif_msg_link(db), 0); | ||
| 482 | |||
| 483 | if (netif_running(db->ndev)) | ||
| 484 | dm9000_schedule_poll(db); | ||
| 485 | } | ||
| 468 | 486 | ||
| 469 | /* dm9000_release_board | 487 | /* dm9000_release_board |
| 470 | * | 488 | * |
| @@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db) | |||
| 503 | /* | 521 | /* |
| 504 | * Search DM9000 board, allocate space and register it | 522 | * Search DM9000 board, allocate space and register it |
| 505 | */ | 523 | */ |
| 506 | static int | 524 | static int __devinit |
| 507 | dm9000_probe(struct platform_device *pdev) | 525 | dm9000_probe(struct platform_device *pdev) |
| 508 | { | 526 | { |
| 509 | struct dm9000_plat_data *pdata = pdev->dev.platform_data; | 527 | struct dm9000_plat_data *pdata = pdev->dev.platform_data; |
| @@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev) | |||
| 525 | 543 | ||
| 526 | SET_NETDEV_DEV(ndev, &pdev->dev); | 544 | SET_NETDEV_DEV(ndev, &pdev->dev); |
| 527 | 545 | ||
| 528 | dev_dbg(&pdev->dev, "dm9000_probe()"); | 546 | dev_dbg(&pdev->dev, "dm9000_probe()\n"); |
| 529 | 547 | ||
| 530 | /* setup board info structure */ | 548 | /* setup board info structure */ |
| 531 | db = (struct board_info *) ndev->priv; | 549 | db = (struct board_info *) ndev->priv; |
| 532 | memset(db, 0, sizeof (*db)); | 550 | memset(db, 0, sizeof (*db)); |
| 533 | 551 | ||
| 534 | db->dev = &pdev->dev; | 552 | db->dev = &pdev->dev; |
| 553 | db->ndev = ndev; | ||
| 535 | 554 | ||
| 536 | spin_lock_init(&db->lock); | 555 | spin_lock_init(&db->lock); |
| 537 | mutex_init(&db->addr_lock); | 556 | mutex_init(&db->addr_lock); |
| 538 | 557 | ||
| 558 | INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); | ||
| 559 | |||
| 560 | |||
| 539 | if (pdev->num_resources < 2) { | 561 | if (pdev->num_resources < 2) { |
| 540 | ret = -ENODEV; | 562 | ret = -ENODEV; |
| 541 | goto out; | 563 | goto out; |
| @@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev) | |||
| 761 | 783 | ||
| 762 | mii_check_media(&db->mii, netif_msg_link(db), 1); | 784 | mii_check_media(&db->mii, netif_msg_link(db), 1); |
| 763 | netif_start_queue(dev); | 785 | netif_start_queue(dev); |
| 786 | |||
| 787 | dm9000_schedule_poll(db); | ||
| 764 | 788 | ||
| 765 | return 0; | 789 | return 0; |
| 766 | } | 790 | } |
| @@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev) | |||
| 879 | if (netif_msg_ifdown(db)) | 903 | if (netif_msg_ifdown(db)) |
| 880 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); | 904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); |
| 881 | 905 | ||
| 906 | cancel_delayed_work(&db->phy_poll); | ||
| 907 | |||
| 882 | netif_stop_queue(ndev); | 908 | netif_stop_queue(ndev); |
| 883 | netif_carrier_off(ndev); | 909 | netif_carrier_off(ndev); |
| 884 | 910 | ||
| @@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | |||
| 1288 | spin_unlock_irqrestore(&db->lock,flags); | 1314 | spin_unlock_irqrestore(&db->lock,flags); |
| 1289 | 1315 | ||
| 1290 | mutex_unlock(&db->addr_lock); | 1316 | mutex_unlock(&db->addr_lock); |
| 1317 | |||
| 1318 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
| 1291 | return ret; | 1319 | return ret; |
| 1292 | } | 1320 | } |
| 1293 | 1321 | ||
| @@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value) | |||
| 1301 | unsigned long flags; | 1329 | unsigned long flags; |
| 1302 | unsigned long reg_save; | 1330 | unsigned long reg_save; |
| 1303 | 1331 | ||
| 1332 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
| 1304 | mutex_lock(&db->addr_lock); | 1333 | mutex_lock(&db->addr_lock); |
| 1305 | 1334 | ||
| 1306 | spin_lock_irqsave(&db->lock,flags); | 1335 | spin_lock_irqsave(&db->lock,flags); |
| @@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev) | |||
| 1372 | return 0; | 1401 | return 0; |
| 1373 | } | 1402 | } |
| 1374 | 1403 | ||
| 1375 | static int | 1404 | static int __devexit |
| 1376 | dm9000_drv_remove(struct platform_device *pdev) | 1405 | dm9000_drv_remove(struct platform_device *pdev) |
| 1377 | { | 1406 | { |
| 1378 | struct net_device *ndev = platform_get_drvdata(pdev); | 1407 | struct net_device *ndev = platform_get_drvdata(pdev); |
| @@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = { | |||
| 1393 | .owner = THIS_MODULE, | 1422 | .owner = THIS_MODULE, |
| 1394 | }, | 1423 | }, |
| 1395 | .probe = dm9000_probe, | 1424 | .probe = dm9000_probe, |
| 1396 | .remove = dm9000_drv_remove, | 1425 | .remove = __devexit_p(dm9000_drv_remove), |
| 1397 | .suspend = dm9000_drv_suspend, | 1426 | .suspend = dm9000_drv_suspend, |
| 1398 | .resume = dm9000_drv_resume, | 1427 | .resume = dm9000_drv_resume, |
| 1399 | }; | 1428 | }; |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f5dacceab95b..fe872fbd671e 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
| 41 | 41 | ||
| 42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
| 43 | #define DRV_VERSION "EHEA_0090" | 43 | #define DRV_VERSION "EHEA_0091" |
| 44 | 44 | ||
| 45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
| 46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
| @@ -118,6 +118,13 @@ | |||
| 118 | #define EHEA_MR_ACC_CTRL 0x00800000 | 118 | #define EHEA_MR_ACC_CTRL 0x00800000 |
| 119 | 119 | ||
| 120 | #define EHEA_BUSMAP_START 0x8000000000000000ULL | 120 | #define EHEA_BUSMAP_START 0x8000000000000000ULL |
| 121 | #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL | ||
| 122 | #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ | ||
| 123 | #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) | ||
| 124 | #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) | ||
| 125 | #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ | ||
| 126 | #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) | ||
| 127 | |||
| 121 | 128 | ||
| 122 | #define EHEA_WATCH_DOG_TIMEOUT 10*HZ | 129 | #define EHEA_WATCH_DOG_TIMEOUT 10*HZ |
| 123 | 130 | ||
| @@ -192,10 +199,20 @@ struct h_epas { | |||
| 192 | set to 0 if unused */ | 199 | set to 0 if unused */ |
| 193 | }; | 200 | }; |
| 194 | 201 | ||
| 195 | struct ehea_busmap { | 202 | /* |
| 196 | unsigned int entries; /* total number of entries */ | 203 | * Memory map data structures |
| 197 | unsigned int valid_sections; /* number of valid sections */ | 204 | */ |
| 198 | u64 *vaddr; | 205 | struct ehea_dir_bmap |
| 206 | { | ||
| 207 | u64 ent[EHEA_MAP_ENTRIES]; | ||
| 208 | }; | ||
| 209 | struct ehea_top_bmap | ||
| 210 | { | ||
| 211 | struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; | ||
| 212 | }; | ||
| 213 | struct ehea_bmap | ||
| 214 | { | ||
| 215 | struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; | ||
| 199 | }; | 216 | }; |
| 200 | 217 | ||
| 201 | struct ehea_qp; | 218 | struct ehea_qp; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f9bc21c74b59..d1b6d4e7495d 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/if_ether.h> | 35 | #include <linux/if_ether.h> |
| 36 | #include <linux/notifier.h> | 36 | #include <linux/notifier.h> |
| 37 | #include <linux/reboot.h> | 37 | #include <linux/reboot.h> |
| 38 | #include <linux/memory.h> | ||
| 38 | #include <asm/kexec.h> | 39 | #include <asm/kexec.h> |
| 39 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
| 40 | 41 | ||
| @@ -3503,6 +3504,24 @@ void ehea_crash_handler(void) | |||
| 3503 | 0, H_DEREG_BCMC); | 3504 | 0, H_DEREG_BCMC); |
| 3504 | } | 3505 | } |
| 3505 | 3506 | ||
| 3507 | static int ehea_mem_notifier(struct notifier_block *nb, | ||
| 3508 | unsigned long action, void *data) | ||
| 3509 | { | ||
| 3510 | switch (action) { | ||
| 3511 | case MEM_OFFLINE: | ||
| 3512 | ehea_info("memory has been removed"); | ||
| 3513 | ehea_rereg_mrs(NULL); | ||
| 3514 | break; | ||
| 3515 | default: | ||
| 3516 | break; | ||
| 3517 | } | ||
| 3518 | return NOTIFY_OK; | ||
| 3519 | } | ||
| 3520 | |||
| 3521 | static struct notifier_block ehea_mem_nb = { | ||
| 3522 | .notifier_call = ehea_mem_notifier, | ||
| 3523 | }; | ||
| 3524 | |||
| 3506 | static int ehea_reboot_notifier(struct notifier_block *nb, | 3525 | static int ehea_reboot_notifier(struct notifier_block *nb, |
| 3507 | unsigned long action, void *unused) | 3526 | unsigned long action, void *unused) |
| 3508 | { | 3527 | { |
| @@ -3581,6 +3600,10 @@ int __init ehea_module_init(void) | |||
| 3581 | if (ret) | 3600 | if (ret) |
| 3582 | ehea_info("failed registering reboot notifier"); | 3601 | ehea_info("failed registering reboot notifier"); |
| 3583 | 3602 | ||
| 3603 | ret = register_memory_notifier(&ehea_mem_nb); | ||
| 3604 | if (ret) | ||
| 3605 | ehea_info("failed registering memory remove notifier"); | ||
| 3606 | |||
| 3584 | ret = crash_shutdown_register(&ehea_crash_handler); | 3607 | ret = crash_shutdown_register(&ehea_crash_handler); |
| 3585 | if (ret) | 3608 | if (ret) |
| 3586 | ehea_info("failed registering crash handler"); | 3609 | ehea_info("failed registering crash handler"); |
| @@ -3604,6 +3627,7 @@ int __init ehea_module_init(void) | |||
| 3604 | out3: | 3627 | out3: |
| 3605 | ibmebus_unregister_driver(&ehea_driver); | 3628 | ibmebus_unregister_driver(&ehea_driver); |
| 3606 | out2: | 3629 | out2: |
| 3630 | unregister_memory_notifier(&ehea_mem_nb); | ||
| 3607 | unregister_reboot_notifier(&ehea_reboot_nb); | 3631 | unregister_reboot_notifier(&ehea_reboot_nb); |
| 3608 | crash_shutdown_unregister(&ehea_crash_handler); | 3632 | crash_shutdown_unregister(&ehea_crash_handler); |
| 3609 | out: | 3633 | out: |
| @@ -3621,6 +3645,7 @@ static void __exit ehea_module_exit(void) | |||
| 3621 | ret = crash_shutdown_unregister(&ehea_crash_handler); | 3645 | ret = crash_shutdown_unregister(&ehea_crash_handler); |
| 3622 | if (ret) | 3646 | if (ret) |
| 3623 | ehea_info("failed unregistering crash handler"); | 3647 | ehea_info("failed unregistering crash handler"); |
| 3648 | unregister_memory_notifier(&ehea_mem_nb); | ||
| 3624 | kfree(ehea_fw_handles.arr); | 3649 | kfree(ehea_fw_handles.arr); |
| 3625 | kfree(ehea_bcmc_regs.arr); | 3650 | kfree(ehea_bcmc_regs.arr); |
| 3626 | ehea_destroy_busmap(); | 3651 | ehea_destroy_busmap(); |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index d522e905f460..140f05baafd8 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
| @@ -31,8 +31,8 @@ | |||
| 31 | #include "ehea_phyp.h" | 31 | #include "ehea_phyp.h" |
| 32 | #include "ehea_qmr.h" | 32 | #include "ehea_qmr.h" |
| 33 | 33 | ||
| 34 | struct ehea_bmap *ehea_bmap = NULL; | ||
| 34 | 35 | ||
| 35 | struct ehea_busmap ehea_bmap = { 0, 0, NULL }; | ||
| 36 | 36 | ||
| 37 | 37 | ||
| 38 | static void *hw_qpageit_get_inc(struct hw_queue *queue) | 38 | static void *hw_qpageit_get_inc(struct hw_queue *queue) |
| @@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
| 559 | return 0; | 559 | return 0; |
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | int ehea_create_busmap(void) | 562 | static inline int ehea_calc_index(unsigned long i, unsigned long s) |
| 563 | { | 563 | { |
| 564 | u64 vaddr = EHEA_BUSMAP_START; | 564 | return (i >> s) & EHEA_INDEX_MASK; |
| 565 | unsigned long high_section_index = 0; | 565 | } |
| 566 | int i; | ||
| 567 | 566 | ||
| 568 | /* | 567 | static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, |
| 569 | * Sections are not in ascending order -> Loop over all sections and | 568 | int dir) |
| 570 | * find the highest PFN to compute the required map size. | 569 | { |
| 571 | */ | 570 | if(!ehea_top_bmap->dir[dir]) { |
| 572 | ehea_bmap.valid_sections = 0; | 571 | ehea_top_bmap->dir[dir] = |
| 572 | kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); | ||
| 573 | if (!ehea_top_bmap->dir[dir]) | ||
| 574 | return -ENOMEM; | ||
| 575 | } | ||
| 576 | return 0; | ||
| 577 | } | ||
| 573 | 578 | ||
| 574 | for (i = 0; i < NR_MEM_SECTIONS; i++) | 579 | static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) |
| 575 | if (valid_section_nr(i)) | 580 | { |
| 576 | high_section_index = i; | 581 | if(!ehea_bmap->top[top]) { |
| 582 | ehea_bmap->top[top] = | ||
| 583 | kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); | ||
| 584 | if (!ehea_bmap->top[top]) | ||
| 585 | return -ENOMEM; | ||
| 586 | } | ||
| 587 | return ehea_init_top_bmap(ehea_bmap->top[top], dir); | ||
| 588 | } | ||
| 577 | 589 | ||
| 578 | ehea_bmap.entries = high_section_index + 1; | 590 | static int ehea_create_busmap_callback(unsigned long pfn, |
| 579 | ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); | 591 | unsigned long nr_pages, void *arg) |
| 592 | { | ||
| 593 | unsigned long i, mr_len, start_section, end_section; | ||
| 594 | start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; | ||
| 595 | end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); | ||
| 596 | mr_len = *(unsigned long *)arg; | ||
| 580 | 597 | ||
| 581 | if (!ehea_bmap.vaddr) | 598 | ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); |
| 599 | if (!ehea_bmap) | ||
| 582 | return -ENOMEM; | 600 | return -ENOMEM; |
| 583 | 601 | ||
| 584 | for (i = 0 ; i < ehea_bmap.entries; i++) { | 602 | for (i = start_section; i < end_section; i++) { |
| 585 | unsigned long pfn = section_nr_to_pfn(i); | 603 | int ret; |
| 604 | int top, dir, idx; | ||
| 605 | u64 vaddr; | ||
| 606 | |||
| 607 | top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); | ||
| 608 | dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); | ||
| 609 | |||
| 610 | ret = ehea_init_bmap(ehea_bmap, top, dir); | ||
| 611 | if(ret) | ||
| 612 | return ret; | ||
| 586 | 613 | ||
| 587 | if (pfn_valid(pfn)) { | 614 | idx = i & EHEA_INDEX_MASK; |
| 588 | ehea_bmap.vaddr[i] = vaddr; | 615 | vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; |
| 589 | vaddr += EHEA_SECTSIZE; | 616 | |
| 590 | ehea_bmap.valid_sections++; | 617 | ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; |
| 591 | } else | ||
| 592 | ehea_bmap.vaddr[i] = 0; | ||
| 593 | } | 618 | } |
| 594 | 619 | ||
| 620 | mr_len += nr_pages * PAGE_SIZE; | ||
| 621 | *(unsigned long *)arg = mr_len; | ||
| 622 | |||
| 595 | return 0; | 623 | return 0; |
| 596 | } | 624 | } |
| 597 | 625 | ||
| 626 | static unsigned long ehea_mr_len; | ||
| 627 | |||
| 628 | static DEFINE_MUTEX(ehea_busmap_mutex); | ||
| 629 | |||
| 630 | int ehea_create_busmap(void) | ||
| 631 | { | ||
| 632 | int ret; | ||
| 633 | mutex_lock(&ehea_busmap_mutex); | ||
| 634 | ehea_mr_len = 0; | ||
| 635 | ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, | ||
| 636 | ehea_create_busmap_callback); | ||
| 637 | mutex_unlock(&ehea_busmap_mutex); | ||
| 638 | return ret; | ||
| 639 | } | ||
| 640 | |||
| 598 | void ehea_destroy_busmap(void) | 641 | void ehea_destroy_busmap(void) |
| 599 | { | 642 | { |
| 600 | vfree(ehea_bmap.vaddr); | 643 | int top, dir; |
| 644 | mutex_lock(&ehea_busmap_mutex); | ||
| 645 | if (!ehea_bmap) | ||
| 646 | goto out_destroy; | ||
| 647 | |||
| 648 | for (top = 0; top < EHEA_MAP_ENTRIES; top++) { | ||
| 649 | if (!ehea_bmap->top[top]) | ||
| 650 | continue; | ||
| 651 | |||
| 652 | for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { | ||
| 653 | if (!ehea_bmap->top[top]->dir[dir]) | ||
| 654 | continue; | ||
| 655 | |||
| 656 | kfree(ehea_bmap->top[top]->dir[dir]); | ||
| 657 | } | ||
| 658 | |||
| 659 | kfree(ehea_bmap->top[top]); | ||
| 660 | } | ||
| 661 | |||
| 662 | kfree(ehea_bmap); | ||
| 663 | ehea_bmap = NULL; | ||
| 664 | out_destroy: | ||
| 665 | mutex_unlock(&ehea_busmap_mutex); | ||
| 601 | } | 666 | } |
| 602 | 667 | ||
| 603 | u64 ehea_map_vaddr(void *caddr) | 668 | u64 ehea_map_vaddr(void *caddr) |
| 604 | { | 669 | { |
| 605 | u64 mapped_addr; | 670 | int top, dir, idx; |
| 606 | unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; | 671 | unsigned long index, offset; |
| 607 | 672 | ||
| 608 | if (likely(index < ehea_bmap.entries)) { | 673 | if (!ehea_bmap) |
| 609 | mapped_addr = ehea_bmap.vaddr[index]; | 674 | return EHEA_INVAL_ADDR; |
| 610 | if (likely(mapped_addr)) | 675 | |
| 611 | mapped_addr |= (((unsigned long)caddr) | 676 | index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; |
| 612 | & (EHEA_SECTSIZE - 1)); | 677 | top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; |
| 613 | else | 678 | if (!ehea_bmap->top[top]) |
| 614 | mapped_addr = -1; | 679 | return EHEA_INVAL_ADDR; |
| 615 | } else | 680 | |
| 616 | mapped_addr = -1; | 681 | dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; |
| 617 | 682 | if (!ehea_bmap->top[top]->dir[dir]) | |
| 618 | if (unlikely(mapped_addr == -1)) | 683 | return EHEA_INVAL_ADDR; |
| 619 | if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) | 684 | |
| 620 | schedule_work(&ehea_rereg_mr_task); | 685 | idx = index & EHEA_INDEX_MASK; |
| 621 | 686 | if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) | |
| 622 | return mapped_addr; | 687 | return EHEA_INVAL_ADDR; |
| 688 | |||
| 689 | offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); | ||
| 690 | return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; | ||
| 691 | } | ||
| 692 | |||
| 693 | static inline void *ehea_calc_sectbase(int top, int dir, int idx) | ||
| 694 | { | ||
| 695 | unsigned long ret = idx; | ||
| 696 | ret |= dir << EHEA_DIR_INDEX_SHIFT; | ||
| 697 | ret |= top << EHEA_TOP_INDEX_SHIFT; | ||
| 698 | return abs_to_virt(ret << SECTION_SIZE_BITS); | ||
| 699 | } | ||
| 700 | |||
| 701 | static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, | ||
| 702 | struct ehea_adapter *adapter, | ||
| 703 | struct ehea_mr *mr) | ||
| 704 | { | ||
| 705 | void *pg; | ||
| 706 | u64 j, m, hret; | ||
| 707 | unsigned long k = 0; | ||
| 708 | u64 pt_abs = virt_to_abs(pt); | ||
| 709 | |||
| 710 | void *sectbase = ehea_calc_sectbase(top, dir, idx); | ||
| 711 | |||
| 712 | for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { | ||
| 713 | |||
| 714 | for (m = 0; m < EHEA_MAX_RPAGE; m++) { | ||
| 715 | pg = sectbase + ((k++) * EHEA_PAGESIZE); | ||
| 716 | pt[m] = virt_to_abs(pg); | ||
| 717 | } | ||
| 718 | hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, | ||
| 719 | 0, pt_abs, EHEA_MAX_RPAGE); | ||
| 720 | |||
| 721 | if ((hret != H_SUCCESS) | ||
| 722 | && (hret != H_PAGE_REGISTERED)) { | ||
| 723 | ehea_h_free_resource(adapter->handle, mr->handle, | ||
| 724 | FORCE_FREE); | ||
| 725 | ehea_error("register_rpage_mr failed"); | ||
| 726 | return hret; | ||
| 727 | } | ||
| 728 | } | ||
| 729 | return hret; | ||
| 730 | } | ||
| 731 | |||
| 732 | static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, | ||
| 733 | struct ehea_adapter *adapter, | ||
| 734 | struct ehea_mr *mr) | ||
| 735 | { | ||
| 736 | u64 hret = H_SUCCESS; | ||
| 737 | int idx; | ||
| 738 | |||
| 739 | for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { | ||
| 740 | if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) | ||
| 741 | continue; | ||
| 742 | |||
| 743 | hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); | ||
| 744 | if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) | ||
| 745 | return hret; | ||
| 746 | } | ||
| 747 | return hret; | ||
| 748 | } | ||
| 749 | |||
| 750 | static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, | ||
| 751 | struct ehea_adapter *adapter, | ||
| 752 | struct ehea_mr *mr) | ||
| 753 | { | ||
| 754 | u64 hret = H_SUCCESS; | ||
| 755 | int dir; | ||
| 756 | |||
| 757 | for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { | ||
| 758 | if (!ehea_bmap->top[top]->dir[dir]) | ||
| 759 | continue; | ||
| 760 | |||
| 761 | hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); | ||
| 762 | if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) | ||
| 763 | return hret; | ||
| 764 | } | ||
| 765 | return hret; | ||
| 623 | } | 766 | } |
| 624 | 767 | ||
| 625 | int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | 768 | int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) |
| 626 | { | 769 | { |
| 627 | int ret; | 770 | int ret; |
| 628 | u64 *pt; | 771 | u64 *pt; |
| 629 | void *pg; | 772 | u64 hret; |
| 630 | u64 hret, pt_abs, i, j, m, mr_len; | ||
| 631 | u32 acc_ctrl = EHEA_MR_ACC_CTRL; | 773 | u32 acc_ctrl = EHEA_MR_ACC_CTRL; |
| 632 | 774 | ||
| 633 | mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; | 775 | unsigned long top; |
| 634 | 776 | ||
| 635 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); | 777 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); |
| 636 | if (!pt) { | 778 | if (!pt) { |
| 637 | ehea_error("no mem"); | 779 | ehea_error("no mem"); |
| 638 | ret = -ENOMEM; | 780 | ret = -ENOMEM; |
| 639 | goto out; | 781 | goto out; |
| 640 | } | 782 | } |
| 641 | pt_abs = virt_to_abs(pt); | ||
| 642 | 783 | ||
| 643 | hret = ehea_h_alloc_resource_mr(adapter->handle, | 784 | hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, |
| 644 | EHEA_BUSMAP_START, mr_len, | 785 | ehea_mr_len, acc_ctrl, adapter->pd, |
| 645 | acc_ctrl, adapter->pd, | ||
| 646 | &mr->handle, &mr->lkey); | 786 | &mr->handle, &mr->lkey); |
| 787 | |||
| 647 | if (hret != H_SUCCESS) { | 788 | if (hret != H_SUCCESS) { |
| 648 | ehea_error("alloc_resource_mr failed"); | 789 | ehea_error("alloc_resource_mr failed"); |
| 649 | ret = -EIO; | 790 | ret = -EIO; |
| 650 | goto out; | 791 | goto out; |
| 651 | } | 792 | } |
| 652 | 793 | ||
| 653 | for (i = 0 ; i < ehea_bmap.entries; i++) | 794 | if (!ehea_bmap) { |
| 654 | if (ehea_bmap.vaddr[i]) { | 795 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
| 655 | void *sectbase = __va(i << SECTION_SIZE_BITS); | 796 | ehea_error("no busmap available"); |
| 656 | unsigned long k = 0; | 797 | ret = -EIO; |
| 657 | 798 | goto out; | |
| 658 | for (j = 0; j < (EHEA_PAGES_PER_SECTION / | 799 | } |
| 659 | EHEA_MAX_RPAGE); j++) { | 800 | |
| 660 | 801 | for (top = 0; top < EHEA_MAP_ENTRIES; top++) { | |
| 661 | for (m = 0; m < EHEA_MAX_RPAGE; m++) { | 802 | if (!ehea_bmap->top[top]) |
| 662 | pg = sectbase + ((k++) * EHEA_PAGESIZE); | 803 | continue; |
| 663 | pt[m] = virt_to_abs(pg); | 804 | |
| 664 | } | 805 | hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); |
| 665 | 806 | if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) | |
| 666 | hret = ehea_h_register_rpage_mr(adapter->handle, | 807 | break; |
| 667 | mr->handle, | 808 | } |
| 668 | 0, 0, pt_abs, | ||
| 669 | EHEA_MAX_RPAGE); | ||
| 670 | if ((hret != H_SUCCESS) | ||
| 671 | && (hret != H_PAGE_REGISTERED)) { | ||
| 672 | ehea_h_free_resource(adapter->handle, | ||
| 673 | mr->handle, | ||
| 674 | FORCE_FREE); | ||
| 675 | ehea_error("register_rpage_mr failed"); | ||
| 676 | ret = -EIO; | ||
| 677 | goto out; | ||
| 678 | } | ||
| 679 | } | ||
| 680 | } | ||
| 681 | 809 | ||
| 682 | if (hret != H_SUCCESS) { | 810 | if (hret != H_SUCCESS) { |
| 683 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); | 811 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 6f22f068d6ee..25bdd0832df5 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -635,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv) | |||
| 635 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 635 | dev_kfree_skb_any(priv->tx_skbuff[i]); |
| 636 | priv->tx_skbuff[i] = NULL; | 636 | priv->tx_skbuff[i] = NULL; |
| 637 | } | 637 | } |
| 638 | |||
| 639 | txbdp++; | ||
| 638 | } | 640 | } |
| 639 | 641 | ||
| 640 | kfree(priv->tx_skbuff); | 642 | kfree(priv->tx_skbuff); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index ef63c8d2bd7e..c91b12ea26ad 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -144,11 +144,13 @@ struct myri10ge_tx_buf { | |||
| 144 | char *req_bytes; | 144 | char *req_bytes; |
| 145 | struct myri10ge_tx_buffer_state *info; | 145 | struct myri10ge_tx_buffer_state *info; |
| 146 | int mask; /* number of transmit slots -1 */ | 146 | int mask; /* number of transmit slots -1 */ |
| 147 | int boundary; /* boundary transmits cannot cross */ | ||
| 148 | int req ____cacheline_aligned; /* transmit slots submitted */ | 147 | int req ____cacheline_aligned; /* transmit slots submitted */ |
| 149 | int pkt_start; /* packets started */ | 148 | int pkt_start; /* packets started */ |
| 149 | int stop_queue; | ||
| 150 | int linearized; | ||
| 150 | int done ____cacheline_aligned; /* transmit slots completed */ | 151 | int done ____cacheline_aligned; /* transmit slots completed */ |
| 151 | int pkt_done; /* packets completed */ | 152 | int pkt_done; /* packets completed */ |
| 153 | int wake_queue; | ||
| 152 | }; | 154 | }; |
| 153 | 155 | ||
| 154 | struct myri10ge_rx_done { | 156 | struct myri10ge_rx_done { |
| @@ -160,29 +162,50 @@ struct myri10ge_rx_done { | |||
| 160 | struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; | 162 | struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; |
| 161 | }; | 163 | }; |
| 162 | 164 | ||
| 163 | struct myri10ge_priv { | 165 | struct myri10ge_slice_netstats { |
| 164 | int running; /* running? */ | 166 | unsigned long rx_packets; |
| 165 | int csum_flag; /* rx_csums? */ | 167 | unsigned long tx_packets; |
| 168 | unsigned long rx_bytes; | ||
| 169 | unsigned long tx_bytes; | ||
| 170 | unsigned long rx_dropped; | ||
| 171 | unsigned long tx_dropped; | ||
| 172 | }; | ||
| 173 | |||
| 174 | struct myri10ge_slice_state { | ||
| 166 | struct myri10ge_tx_buf tx; /* transmit ring */ | 175 | struct myri10ge_tx_buf tx; /* transmit ring */ |
| 167 | struct myri10ge_rx_buf rx_small; | 176 | struct myri10ge_rx_buf rx_small; |
| 168 | struct myri10ge_rx_buf rx_big; | 177 | struct myri10ge_rx_buf rx_big; |
| 169 | struct myri10ge_rx_done rx_done; | 178 | struct myri10ge_rx_done rx_done; |
| 179 | struct net_device *dev; | ||
| 180 | struct napi_struct napi; | ||
| 181 | struct myri10ge_priv *mgp; | ||
| 182 | struct myri10ge_slice_netstats stats; | ||
| 183 | __be32 __iomem *irq_claim; | ||
| 184 | struct mcp_irq_data *fw_stats; | ||
| 185 | dma_addr_t fw_stats_bus; | ||
| 186 | int watchdog_tx_done; | ||
| 187 | int watchdog_tx_req; | ||
| 188 | }; | ||
| 189 | |||
| 190 | struct myri10ge_priv { | ||
| 191 | struct myri10ge_slice_state ss; | ||
| 192 | int tx_boundary; /* boundary transmits cannot cross */ | ||
| 193 | int running; /* running? */ | ||
| 194 | int csum_flag; /* rx_csums? */ | ||
| 170 | int small_bytes; | 195 | int small_bytes; |
| 171 | int big_bytes; | 196 | int big_bytes; |
| 197 | int max_intr_slots; | ||
| 172 | struct net_device *dev; | 198 | struct net_device *dev; |
| 173 | struct napi_struct napi; | ||
| 174 | struct net_device_stats stats; | 199 | struct net_device_stats stats; |
| 200 | spinlock_t stats_lock; | ||
| 175 | u8 __iomem *sram; | 201 | u8 __iomem *sram; |
| 176 | int sram_size; | 202 | int sram_size; |
| 177 | unsigned long board_span; | 203 | unsigned long board_span; |
| 178 | unsigned long iomem_base; | 204 | unsigned long iomem_base; |
| 179 | __be32 __iomem *irq_claim; | ||
| 180 | __be32 __iomem *irq_deassert; | 205 | __be32 __iomem *irq_deassert; |
| 181 | char *mac_addr_string; | 206 | char *mac_addr_string; |
| 182 | struct mcp_cmd_response *cmd; | 207 | struct mcp_cmd_response *cmd; |
| 183 | dma_addr_t cmd_bus; | 208 | dma_addr_t cmd_bus; |
| 184 | struct mcp_irq_data *fw_stats; | ||
| 185 | dma_addr_t fw_stats_bus; | ||
| 186 | struct pci_dev *pdev; | 209 | struct pci_dev *pdev; |
| 187 | int msi_enabled; | 210 | int msi_enabled; |
| 188 | u32 link_state; | 211 | u32 link_state; |
| @@ -191,20 +214,16 @@ struct myri10ge_priv { | |||
| 191 | __be32 __iomem *intr_coal_delay_ptr; | 214 | __be32 __iomem *intr_coal_delay_ptr; |
| 192 | int mtrr; | 215 | int mtrr; |
| 193 | int wc_enabled; | 216 | int wc_enabled; |
| 194 | int wake_queue; | ||
| 195 | int stop_queue; | ||
| 196 | int down_cnt; | 217 | int down_cnt; |
| 197 | wait_queue_head_t down_wq; | 218 | wait_queue_head_t down_wq; |
| 198 | struct work_struct watchdog_work; | 219 | struct work_struct watchdog_work; |
| 199 | struct timer_list watchdog_timer; | 220 | struct timer_list watchdog_timer; |
| 200 | int watchdog_tx_done; | ||
| 201 | int watchdog_tx_req; | ||
| 202 | int watchdog_pause; | ||
| 203 | int watchdog_resets; | 221 | int watchdog_resets; |
| 204 | int tx_linearized; | 222 | int watchdog_pause; |
| 205 | int pause; | 223 | int pause; |
| 206 | char *fw_name; | 224 | char *fw_name; |
| 207 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; | 225 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; |
| 226 | char *product_code_string; | ||
| 208 | char fw_version[128]; | 227 | char fw_version[128]; |
| 209 | int fw_ver_major; | 228 | int fw_ver_major; |
| 210 | int fw_ver_minor; | 229 | int fw_ver_minor; |
| @@ -228,58 +247,54 @@ static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; | |||
| 228 | 247 | ||
| 229 | static char *myri10ge_fw_name = NULL; | 248 | static char *myri10ge_fw_name = NULL; |
| 230 | module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); | 249 | module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); |
| 231 | MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); | 250 | MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); |
| 232 | 251 | ||
| 233 | static int myri10ge_ecrc_enable = 1; | 252 | static int myri10ge_ecrc_enable = 1; |
| 234 | module_param(myri10ge_ecrc_enable, int, S_IRUGO); | 253 | module_param(myri10ge_ecrc_enable, int, S_IRUGO); |
| 235 | MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); | 254 | MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); |
| 236 | |||
| 237 | static int myri10ge_max_intr_slots = 1024; | ||
| 238 | module_param(myri10ge_max_intr_slots, int, S_IRUGO); | ||
| 239 | MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); | ||
| 240 | 255 | ||
| 241 | static int myri10ge_small_bytes = -1; /* -1 == auto */ | 256 | static int myri10ge_small_bytes = -1; /* -1 == auto */ |
| 242 | module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); | 257 | module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); |
| 243 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); | 258 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); |
| 244 | 259 | ||
| 245 | static int myri10ge_msi = 1; /* enable msi by default */ | 260 | static int myri10ge_msi = 1; /* enable msi by default */ |
| 246 | module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); | 261 | module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); |
| 247 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); | 262 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); |
| 248 | 263 | ||
| 249 | static int myri10ge_intr_coal_delay = 75; | 264 | static int myri10ge_intr_coal_delay = 75; |
| 250 | module_param(myri10ge_intr_coal_delay, int, S_IRUGO); | 265 | module_param(myri10ge_intr_coal_delay, int, S_IRUGO); |
| 251 | MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); | 266 | MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); |
| 252 | 267 | ||
| 253 | static int myri10ge_flow_control = 1; | 268 | static int myri10ge_flow_control = 1; |
| 254 | module_param(myri10ge_flow_control, int, S_IRUGO); | 269 | module_param(myri10ge_flow_control, int, S_IRUGO); |
| 255 | MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); | 270 | MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); |
| 256 | 271 | ||
| 257 | static int myri10ge_deassert_wait = 1; | 272 | static int myri10ge_deassert_wait = 1; |
| 258 | module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); | 273 | module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); |
| 259 | MODULE_PARM_DESC(myri10ge_deassert_wait, | 274 | MODULE_PARM_DESC(myri10ge_deassert_wait, |
| 260 | "Wait when deasserting legacy interrupts\n"); | 275 | "Wait when deasserting legacy interrupts"); |
| 261 | 276 | ||
| 262 | static int myri10ge_force_firmware = 0; | 277 | static int myri10ge_force_firmware = 0; |
| 263 | module_param(myri10ge_force_firmware, int, S_IRUGO); | 278 | module_param(myri10ge_force_firmware, int, S_IRUGO); |
| 264 | MODULE_PARM_DESC(myri10ge_force_firmware, | 279 | MODULE_PARM_DESC(myri10ge_force_firmware, |
| 265 | "Force firmware to assume aligned completions\n"); | 280 | "Force firmware to assume aligned completions"); |
| 266 | 281 | ||
| 267 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | 282 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; |
| 268 | module_param(myri10ge_initial_mtu, int, S_IRUGO); | 283 | module_param(myri10ge_initial_mtu, int, S_IRUGO); |
| 269 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); | 284 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); |
| 270 | 285 | ||
| 271 | static int myri10ge_napi_weight = 64; | 286 | static int myri10ge_napi_weight = 64; |
| 272 | module_param(myri10ge_napi_weight, int, S_IRUGO); | 287 | module_param(myri10ge_napi_weight, int, S_IRUGO); |
| 273 | MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); | 288 | MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); |
| 274 | 289 | ||
| 275 | static int myri10ge_watchdog_timeout = 1; | 290 | static int myri10ge_watchdog_timeout = 1; |
| 276 | module_param(myri10ge_watchdog_timeout, int, S_IRUGO); | 291 | module_param(myri10ge_watchdog_timeout, int, S_IRUGO); |
| 277 | MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); | 292 | MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); |
| 278 | 293 | ||
| 279 | static int myri10ge_max_irq_loops = 1048576; | 294 | static int myri10ge_max_irq_loops = 1048576; |
| 280 | module_param(myri10ge_max_irq_loops, int, S_IRUGO); | 295 | module_param(myri10ge_max_irq_loops, int, S_IRUGO); |
| 281 | MODULE_PARM_DESC(myri10ge_max_irq_loops, | 296 | MODULE_PARM_DESC(myri10ge_max_irq_loops, |
| 282 | "Set stuck legacy IRQ detection threshold\n"); | 297 | "Set stuck legacy IRQ detection threshold"); |
| 283 | 298 | ||
| 284 | #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK | 299 | #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK |
| 285 | 300 | ||
| @@ -289,21 +304,22 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); | |||
| 289 | 304 | ||
| 290 | static int myri10ge_lro = 1; | 305 | static int myri10ge_lro = 1; |
| 291 | module_param(myri10ge_lro, int, S_IRUGO); | 306 | module_param(myri10ge_lro, int, S_IRUGO); |
| 292 | MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); | 307 | MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload"); |
| 293 | 308 | ||
| 294 | static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; | 309 | static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; |
| 295 | module_param(myri10ge_lro_max_pkts, int, S_IRUGO); | 310 | module_param(myri10ge_lro_max_pkts, int, S_IRUGO); |
| 296 | MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); | 311 | MODULE_PARM_DESC(myri10ge_lro_max_pkts, |
| 312 | "Number of LRO packets to be aggregated"); | ||
| 297 | 313 | ||
| 298 | static int myri10ge_fill_thresh = 256; | 314 | static int myri10ge_fill_thresh = 256; |
| 299 | module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); | 315 | module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); |
| 300 | MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); | 316 | MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); |
| 301 | 317 | ||
| 302 | static int myri10ge_reset_recover = 1; | 318 | static int myri10ge_reset_recover = 1; |
| 303 | 319 | ||
| 304 | static int myri10ge_wcfifo = 0; | 320 | static int myri10ge_wcfifo = 0; |
| 305 | module_param(myri10ge_wcfifo, int, S_IRUGO); | 321 | module_param(myri10ge_wcfifo, int, S_IRUGO); |
| 306 | MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); | 322 | MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); |
| 307 | 323 | ||
| 308 | #define MYRI10GE_FW_OFFSET 1024*1024 | 324 | #define MYRI10GE_FW_OFFSET 1024*1024 |
| 309 | #define MYRI10GE_HIGHPART_TO_U32(X) \ | 325 | #define MYRI10GE_HIGHPART_TO_U32(X) \ |
| @@ -359,8 +375,10 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, | |||
| 359 | for (sleep_total = 0; | 375 | for (sleep_total = 0; |
| 360 | sleep_total < 1000 | 376 | sleep_total < 1000 |
| 361 | && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); | 377 | && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); |
| 362 | sleep_total += 10) | 378 | sleep_total += 10) { |
| 363 | udelay(10); | 379 | udelay(10); |
| 380 | mb(); | ||
| 381 | } | ||
| 364 | } else { | 382 | } else { |
| 365 | /* use msleep for most command */ | 383 | /* use msleep for most command */ |
| 366 | for (sleep_total = 0; | 384 | for (sleep_total = 0; |
| @@ -420,6 +438,10 @@ static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) | |||
| 420 | ptr += 1; | 438 | ptr += 1; |
| 421 | } | 439 | } |
| 422 | } | 440 | } |
| 441 | if (memcmp(ptr, "PC=", 3) == 0) { | ||
| 442 | ptr += 3; | ||
| 443 | mgp->product_code_string = ptr; | ||
| 444 | } | ||
| 423 | if (memcmp((const void *)ptr, "SN=", 3) == 0) { | 445 | if (memcmp((const void *)ptr, "SN=", 3) == 0) { |
| 424 | ptr += 3; | 446 | ptr += 3; |
| 425 | mgp->serial_number = simple_strtoul(ptr, &ptr, 10); | 447 | mgp->serial_number = simple_strtoul(ptr, &ptr, 10); |
| @@ -442,7 +464,7 @@ abort: | |||
| 442 | static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) | 464 | static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) |
| 443 | { | 465 | { |
| 444 | char __iomem *submit; | 466 | char __iomem *submit; |
| 445 | __be32 buf[16]; | 467 | __be32 buf[16] __attribute__ ((__aligned__(8))); |
| 446 | u32 dma_low, dma_high; | 468 | u32 dma_low, dma_high; |
| 447 | int i; | 469 | int i; |
| 448 | 470 | ||
| @@ -609,13 +631,38 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | |||
| 609 | return status; | 631 | return status; |
| 610 | } | 632 | } |
| 611 | 633 | ||
| 634 | int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) | ||
| 635 | { | ||
| 636 | struct myri10ge_cmd cmd; | ||
| 637 | int status; | ||
| 638 | |||
| 639 | /* probe for IPv6 TSO support */ | ||
| 640 | mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; | ||
| 641 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, | ||
| 642 | &cmd, 0); | ||
| 643 | if (status == 0) { | ||
| 644 | mgp->max_tso6 = cmd.data0; | ||
| 645 | mgp->features |= NETIF_F_TSO6; | ||
| 646 | } | ||
| 647 | |||
| 648 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); | ||
| 649 | if (status != 0) { | ||
| 650 | dev_err(&mgp->pdev->dev, | ||
| 651 | "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); | ||
| 652 | return -ENXIO; | ||
| 653 | } | ||
| 654 | |||
| 655 | mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); | ||
| 656 | |||
| 657 | return 0; | ||
| 658 | } | ||
| 659 | |||
| 612 | static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | 660 | static int myri10ge_load_firmware(struct myri10ge_priv *mgp) |
| 613 | { | 661 | { |
| 614 | char __iomem *submit; | 662 | char __iomem *submit; |
| 615 | __be32 buf[16]; | 663 | __be32 buf[16] __attribute__ ((__aligned__(8))); |
| 616 | u32 dma_low, dma_high, size; | 664 | u32 dma_low, dma_high, size; |
| 617 | int status, i; | 665 | int status, i; |
| 618 | struct myri10ge_cmd cmd; | ||
| 619 | 666 | ||
| 620 | size = 0; | 667 | size = 0; |
| 621 | status = myri10ge_load_hotplug_firmware(mgp, &size); | 668 | status = myri10ge_load_hotplug_firmware(mgp, &size); |
| @@ -635,7 +682,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
| 635 | } | 682 | } |
| 636 | dev_info(&mgp->pdev->dev, | 683 | dev_info(&mgp->pdev->dev, |
| 637 | "Successfully adopted running firmware\n"); | 684 | "Successfully adopted running firmware\n"); |
| 638 | if (mgp->tx.boundary == 4096) { | 685 | if (mgp->tx_boundary == 4096) { |
| 639 | dev_warn(&mgp->pdev->dev, | 686 | dev_warn(&mgp->pdev->dev, |
| 640 | "Using firmware currently running on NIC" | 687 | "Using firmware currently running on NIC" |
| 641 | ". For optimal\n"); | 688 | ". For optimal\n"); |
| @@ -646,7 +693,9 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
| 646 | } | 693 | } |
| 647 | 694 | ||
| 648 | mgp->fw_name = "adopted"; | 695 | mgp->fw_name = "adopted"; |
| 649 | mgp->tx.boundary = 2048; | 696 | mgp->tx_boundary = 2048; |
| 697 | myri10ge_dummy_rdma(mgp, 1); | ||
| 698 | status = myri10ge_get_firmware_capabilities(mgp); | ||
| 650 | return status; | 699 | return status; |
| 651 | } | 700 | } |
| 652 | 701 | ||
| @@ -681,26 +730,18 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
| 681 | msleep(1); | 730 | msleep(1); |
| 682 | mb(); | 731 | mb(); |
| 683 | i = 0; | 732 | i = 0; |
| 684 | while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { | 733 | while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { |
| 685 | msleep(1); | 734 | msleep(1 << i); |
| 686 | i++; | 735 | i++; |
| 687 | } | 736 | } |
| 688 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { | 737 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { |
| 689 | dev_err(&mgp->pdev->dev, "handoff failed\n"); | 738 | dev_err(&mgp->pdev->dev, "handoff failed\n"); |
| 690 | return -ENXIO; | 739 | return -ENXIO; |
| 691 | } | 740 | } |
| 692 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); | ||
| 693 | myri10ge_dummy_rdma(mgp, 1); | 741 | myri10ge_dummy_rdma(mgp, 1); |
| 742 | status = myri10ge_get_firmware_capabilities(mgp); | ||
| 694 | 743 | ||
| 695 | /* probe for IPv6 TSO support */ | 744 | return status; |
| 696 | mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; | ||
| 697 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, | ||
| 698 | &cmd, 0); | ||
| 699 | if (status == 0) { | ||
| 700 | mgp->max_tso6 = cmd.data0; | ||
| 701 | mgp->features |= NETIF_F_TSO6; | ||
| 702 | } | ||
| 703 | return 0; | ||
| 704 | } | 745 | } |
| 705 | 746 | ||
| 706 | static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) | 747 | static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) |
| @@ -772,7 +813,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) | |||
| 772 | * transfers took to complete. | 813 | * transfers took to complete. |
| 773 | */ | 814 | */ |
| 774 | 815 | ||
| 775 | len = mgp->tx.boundary; | 816 | len = mgp->tx_boundary; |
| 776 | 817 | ||
| 777 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); | 818 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); |
| 778 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); | 819 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); |
| @@ -834,17 +875,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
| 834 | 875 | ||
| 835 | /* Now exchange information about interrupts */ | 876 | /* Now exchange information about interrupts */ |
| 836 | 877 | ||
| 837 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 878 | bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
| 838 | memset(mgp->rx_done.entry, 0, bytes); | 879 | memset(mgp->ss.rx_done.entry, 0, bytes); |
| 839 | cmd.data0 = (u32) bytes; | 880 | cmd.data0 = (u32) bytes; |
| 840 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); | 881 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); |
| 841 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | 882 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); |
| 842 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | 883 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); |
| 843 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); | 884 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); |
| 844 | 885 | ||
| 845 | status |= | 886 | status |= |
| 846 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); | 887 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); |
| 847 | mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); | 888 | mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); |
| 848 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, | 889 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, |
| 849 | &cmd, 0); | 890 | &cmd, 0); |
| 850 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); | 891 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); |
| @@ -858,17 +899,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
| 858 | } | 899 | } |
| 859 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); | 900 | put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); |
| 860 | 901 | ||
| 861 | memset(mgp->rx_done.entry, 0, bytes); | 902 | memset(mgp->ss.rx_done.entry, 0, bytes); |
| 862 | 903 | ||
| 863 | /* reset mcp/driver shared state back to 0 */ | 904 | /* reset mcp/driver shared state back to 0 */ |
| 864 | mgp->tx.req = 0; | 905 | mgp->ss.tx.req = 0; |
| 865 | mgp->tx.done = 0; | 906 | mgp->ss.tx.done = 0; |
| 866 | mgp->tx.pkt_start = 0; | 907 | mgp->ss.tx.pkt_start = 0; |
| 867 | mgp->tx.pkt_done = 0; | 908 | mgp->ss.tx.pkt_done = 0; |
| 868 | mgp->rx_big.cnt = 0; | 909 | mgp->ss.rx_big.cnt = 0; |
| 869 | mgp->rx_small.cnt = 0; | 910 | mgp->ss.rx_small.cnt = 0; |
| 870 | mgp->rx_done.idx = 0; | 911 | mgp->ss.rx_done.idx = 0; |
| 871 | mgp->rx_done.cnt = 0; | 912 | mgp->ss.rx_done.cnt = 0; |
| 872 | mgp->link_changes = 0; | 913 | mgp->link_changes = 0; |
| 873 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); | 914 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); |
| 874 | myri10ge_change_pause(mgp, mgp->pause); | 915 | myri10ge_change_pause(mgp, mgp->pause); |
| @@ -1020,9 +1061,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, | |||
| 1020 | * page into an skb */ | 1061 | * page into an skb */ |
| 1021 | 1062 | ||
| 1022 | static inline int | 1063 | static inline int |
| 1023 | myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | 1064 | myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, |
| 1024 | int bytes, int len, __wsum csum) | 1065 | int bytes, int len, __wsum csum) |
| 1025 | { | 1066 | { |
| 1067 | struct myri10ge_priv *mgp = ss->mgp; | ||
| 1026 | struct sk_buff *skb; | 1068 | struct sk_buff *skb; |
| 1027 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | 1069 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
| 1028 | int i, idx, hlen, remainder; | 1070 | int i, idx, hlen, remainder; |
| @@ -1052,11 +1094,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
| 1052 | rx_frags[0].page_offset += MXGEFW_PAD; | 1094 | rx_frags[0].page_offset += MXGEFW_PAD; |
| 1053 | rx_frags[0].size -= MXGEFW_PAD; | 1095 | rx_frags[0].size -= MXGEFW_PAD; |
| 1054 | len -= MXGEFW_PAD; | 1096 | len -= MXGEFW_PAD; |
| 1055 | lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, | 1097 | lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, |
| 1056 | len, len, | 1098 | len, len, |
| 1057 | /* opaque, will come back in get_frag_header */ | 1099 | /* opaque, will come back in get_frag_header */ |
| 1058 | (void *)(__force unsigned long)csum, | 1100 | (void *)(__force unsigned long)csum, csum); |
| 1059 | csum); | ||
| 1060 | return 1; | 1101 | return 1; |
| 1061 | } | 1102 | } |
| 1062 | 1103 | ||
| @@ -1096,10 +1137,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
| 1096 | return 1; | 1137 | return 1; |
| 1097 | } | 1138 | } |
| 1098 | 1139 | ||
| 1099 | static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | 1140 | static inline void |
| 1141 | myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) | ||
| 1100 | { | 1142 | { |
| 1101 | struct pci_dev *pdev = mgp->pdev; | 1143 | struct pci_dev *pdev = ss->mgp->pdev; |
| 1102 | struct myri10ge_tx_buf *tx = &mgp->tx; | 1144 | struct myri10ge_tx_buf *tx = &ss->tx; |
| 1103 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
| 1104 | int idx, len; | 1146 | int idx, len; |
| 1105 | 1147 | ||
| @@ -1117,8 +1159,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | |||
| 1117 | len = pci_unmap_len(&tx->info[idx], len); | 1159 | len = pci_unmap_len(&tx->info[idx], len); |
| 1118 | pci_unmap_len_set(&tx->info[idx], len, 0); | 1160 | pci_unmap_len_set(&tx->info[idx], len, 0); |
| 1119 | if (skb) { | 1161 | if (skb) { |
| 1120 | mgp->stats.tx_bytes += skb->len; | 1162 | ss->stats.tx_bytes += skb->len; |
| 1121 | mgp->stats.tx_packets++; | 1163 | ss->stats.tx_packets++; |
| 1122 | dev_kfree_skb_irq(skb); | 1164 | dev_kfree_skb_irq(skb); |
| 1123 | if (len) | 1165 | if (len) |
| 1124 | pci_unmap_single(pdev, | 1166 | pci_unmap_single(pdev, |
| @@ -1134,16 +1176,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | |||
| 1134 | } | 1176 | } |
| 1135 | } | 1177 | } |
| 1136 | /* start the queue if we've stopped it */ | 1178 | /* start the queue if we've stopped it */ |
| 1137 | if (netif_queue_stopped(mgp->dev) | 1179 | if (netif_queue_stopped(ss->dev) |
| 1138 | && tx->req - tx->done < (tx->mask >> 1)) { | 1180 | && tx->req - tx->done < (tx->mask >> 1)) { |
| 1139 | mgp->wake_queue++; | 1181 | tx->wake_queue++; |
| 1140 | netif_wake_queue(mgp->dev); | 1182 | netif_wake_queue(ss->dev); |
| 1141 | } | 1183 | } |
| 1142 | } | 1184 | } |
| 1143 | 1185 | ||
| 1144 | static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) | 1186 | static inline int |
| 1187 | myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | ||
| 1145 | { | 1188 | { |
| 1146 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | 1189 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
| 1190 | struct myri10ge_priv *mgp = ss->mgp; | ||
| 1147 | unsigned long rx_bytes = 0; | 1191 | unsigned long rx_bytes = 0; |
| 1148 | unsigned long rx_packets = 0; | 1192 | unsigned long rx_packets = 0; |
| 1149 | unsigned long rx_ok; | 1193 | unsigned long rx_ok; |
| @@ -1159,40 +1203,40 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) | |||
| 1159 | rx_done->entry[idx].length = 0; | 1203 | rx_done->entry[idx].length = 0; |
| 1160 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1204 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
| 1161 | if (length <= mgp->small_bytes) | 1205 | if (length <= mgp->small_bytes) |
| 1162 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, | 1206 | rx_ok = myri10ge_rx_done(ss, &ss->rx_small, |
| 1163 | mgp->small_bytes, | 1207 | mgp->small_bytes, |
| 1164 | length, checksum); | 1208 | length, checksum); |
| 1165 | else | 1209 | else |
| 1166 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, | 1210 | rx_ok = myri10ge_rx_done(ss, &ss->rx_big, |
| 1167 | mgp->big_bytes, | 1211 | mgp->big_bytes, |
| 1168 | length, checksum); | 1212 | length, checksum); |
| 1169 | rx_packets += rx_ok; | 1213 | rx_packets += rx_ok; |
| 1170 | rx_bytes += rx_ok * (unsigned long)length; | 1214 | rx_bytes += rx_ok * (unsigned long)length; |
| 1171 | cnt++; | 1215 | cnt++; |
| 1172 | idx = cnt & (myri10ge_max_intr_slots - 1); | 1216 | idx = cnt & (mgp->max_intr_slots - 1); |
| 1173 | work_done++; | 1217 | work_done++; |
| 1174 | } | 1218 | } |
| 1175 | rx_done->idx = idx; | 1219 | rx_done->idx = idx; |
| 1176 | rx_done->cnt = cnt; | 1220 | rx_done->cnt = cnt; |
| 1177 | mgp->stats.rx_packets += rx_packets; | 1221 | ss->stats.rx_packets += rx_packets; |
| 1178 | mgp->stats.rx_bytes += rx_bytes; | 1222 | ss->stats.rx_bytes += rx_bytes; |
| 1179 | 1223 | ||
| 1180 | if (myri10ge_lro) | 1224 | if (myri10ge_lro) |
| 1181 | lro_flush_all(&rx_done->lro_mgr); | 1225 | lro_flush_all(&rx_done->lro_mgr); |
| 1182 | 1226 | ||
| 1183 | /* restock receive rings if needed */ | 1227 | /* restock receive rings if needed */ |
| 1184 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) | 1228 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) |
| 1185 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 1229 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, |
| 1186 | mgp->small_bytes + MXGEFW_PAD, 0); | 1230 | mgp->small_bytes + MXGEFW_PAD, 0); |
| 1187 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) | 1231 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) |
| 1188 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | 1232 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); |
| 1189 | 1233 | ||
| 1190 | return work_done; | 1234 | return work_done; |
| 1191 | } | 1235 | } |
| 1192 | 1236 | ||
| 1193 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | 1237 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) |
| 1194 | { | 1238 | { |
| 1195 | struct mcp_irq_data *stats = mgp->fw_stats; | 1239 | struct mcp_irq_data *stats = mgp->ss.fw_stats; |
| 1196 | 1240 | ||
| 1197 | if (unlikely(stats->stats_updated)) { | 1241 | if (unlikely(stats->stats_updated)) { |
| 1198 | unsigned link_up = ntohl(stats->link_up); | 1242 | unsigned link_up = ntohl(stats->link_up); |
| @@ -1219,9 +1263,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |||
| 1219 | } | 1263 | } |
| 1220 | } | 1264 | } |
| 1221 | if (mgp->rdma_tags_available != | 1265 | if (mgp->rdma_tags_available != |
| 1222 | ntohl(mgp->fw_stats->rdma_tags_available)) { | 1266 | ntohl(stats->rdma_tags_available)) { |
| 1223 | mgp->rdma_tags_available = | 1267 | mgp->rdma_tags_available = |
| 1224 | ntohl(mgp->fw_stats->rdma_tags_available); | 1268 | ntohl(stats->rdma_tags_available); |
| 1225 | printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " | 1269 | printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " |
| 1226 | "%d tags left\n", mgp->dev->name, | 1270 | "%d tags left\n", mgp->dev->name, |
| 1227 | mgp->rdma_tags_available); | 1271 | mgp->rdma_tags_available); |
| @@ -1234,26 +1278,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | |||
| 1234 | 1278 | ||
| 1235 | static int myri10ge_poll(struct napi_struct *napi, int budget) | 1279 | static int myri10ge_poll(struct napi_struct *napi, int budget) |
| 1236 | { | 1280 | { |
| 1237 | struct myri10ge_priv *mgp = | 1281 | struct myri10ge_slice_state *ss = |
| 1238 | container_of(napi, struct myri10ge_priv, napi); | 1282 | container_of(napi, struct myri10ge_slice_state, napi); |
| 1239 | struct net_device *netdev = mgp->dev; | 1283 | struct net_device *netdev = ss->mgp->dev; |
| 1240 | int work_done; | 1284 | int work_done; |
| 1241 | 1285 | ||
| 1242 | /* process as many rx events as NAPI will allow */ | 1286 | /* process as many rx events as NAPI will allow */ |
| 1243 | work_done = myri10ge_clean_rx_done(mgp, budget); | 1287 | work_done = myri10ge_clean_rx_done(ss, budget); |
| 1244 | 1288 | ||
| 1245 | if (work_done < budget) { | 1289 | if (work_done < budget) { |
| 1246 | netif_rx_complete(netdev, napi); | 1290 | netif_rx_complete(netdev, napi); |
| 1247 | put_be32(htonl(3), mgp->irq_claim); | 1291 | put_be32(htonl(3), ss->irq_claim); |
| 1248 | } | 1292 | } |
| 1249 | return work_done; | 1293 | return work_done; |
| 1250 | } | 1294 | } |
| 1251 | 1295 | ||
| 1252 | static irqreturn_t myri10ge_intr(int irq, void *arg) | 1296 | static irqreturn_t myri10ge_intr(int irq, void *arg) |
| 1253 | { | 1297 | { |
| 1254 | struct myri10ge_priv *mgp = arg; | 1298 | struct myri10ge_slice_state *ss = arg; |
| 1255 | struct mcp_irq_data *stats = mgp->fw_stats; | 1299 | struct myri10ge_priv *mgp = ss->mgp; |
| 1256 | struct myri10ge_tx_buf *tx = &mgp->tx; | 1300 | struct mcp_irq_data *stats = ss->fw_stats; |
| 1301 | struct myri10ge_tx_buf *tx = &ss->tx; | ||
| 1257 | u32 send_done_count; | 1302 | u32 send_done_count; |
| 1258 | int i; | 1303 | int i; |
| 1259 | 1304 | ||
| @@ -1264,7 +1309,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
| 1264 | /* low bit indicates receives are present, so schedule | 1309 | /* low bit indicates receives are present, so schedule |
| 1265 | * napi poll handler */ | 1310 | * napi poll handler */ |
| 1266 | if (stats->valid & 1) | 1311 | if (stats->valid & 1) |
| 1267 | netif_rx_schedule(mgp->dev, &mgp->napi); | 1312 | netif_rx_schedule(ss->dev, &ss->napi); |
| 1268 | 1313 | ||
| 1269 | if (!mgp->msi_enabled) { | 1314 | if (!mgp->msi_enabled) { |
| 1270 | put_be32(0, mgp->irq_deassert); | 1315 | put_be32(0, mgp->irq_deassert); |
| @@ -1281,7 +1326,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
| 1281 | /* check for transmit completes and receives */ | 1326 | /* check for transmit completes and receives */ |
| 1282 | send_done_count = ntohl(stats->send_done_count); | 1327 | send_done_count = ntohl(stats->send_done_count); |
| 1283 | if (send_done_count != tx->pkt_done) | 1328 | if (send_done_count != tx->pkt_done) |
| 1284 | myri10ge_tx_done(mgp, (int)send_done_count); | 1329 | myri10ge_tx_done(ss, (int)send_done_count); |
| 1285 | if (unlikely(i > myri10ge_max_irq_loops)) { | 1330 | if (unlikely(i > myri10ge_max_irq_loops)) { |
| 1286 | printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", | 1331 | printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", |
| 1287 | mgp->dev->name); | 1332 | mgp->dev->name); |
| @@ -1296,16 +1341,46 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) | |||
| 1296 | 1341 | ||
| 1297 | myri10ge_check_statblock(mgp); | 1342 | myri10ge_check_statblock(mgp); |
| 1298 | 1343 | ||
| 1299 | put_be32(htonl(3), mgp->irq_claim + 1); | 1344 | put_be32(htonl(3), ss->irq_claim + 1); |
| 1300 | return (IRQ_HANDLED); | 1345 | return (IRQ_HANDLED); |
| 1301 | } | 1346 | } |
| 1302 | 1347 | ||
| 1303 | static int | 1348 | static int |
| 1304 | myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | 1349 | myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) |
| 1305 | { | 1350 | { |
| 1351 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
| 1352 | char *ptr; | ||
| 1353 | int i; | ||
| 1354 | |||
| 1306 | cmd->autoneg = AUTONEG_DISABLE; | 1355 | cmd->autoneg = AUTONEG_DISABLE; |
| 1307 | cmd->speed = SPEED_10000; | 1356 | cmd->speed = SPEED_10000; |
| 1308 | cmd->duplex = DUPLEX_FULL; | 1357 | cmd->duplex = DUPLEX_FULL; |
| 1358 | |||
| 1359 | /* | ||
| 1360 | * parse the product code to deterimine the interface type | ||
| 1361 | * (CX4, XFP, Quad Ribbon Fiber) by looking at the character | ||
| 1362 | * after the 3rd dash in the driver's cached copy of the | ||
| 1363 | * EEPROM's product code string. | ||
| 1364 | */ | ||
| 1365 | ptr = mgp->product_code_string; | ||
| 1366 | if (ptr == NULL) { | ||
| 1367 | printk(KERN_ERR "myri10ge: %s: Missing product code\n", | ||
| 1368 | netdev->name); | ||
| 1369 | return 0; | ||
| 1370 | } | ||
| 1371 | for (i = 0; i < 3; i++, ptr++) { | ||
| 1372 | ptr = strchr(ptr, '-'); | ||
| 1373 | if (ptr == NULL) { | ||
| 1374 | printk(KERN_ERR "myri10ge: %s: Invalid product " | ||
| 1375 | "code %s\n", netdev->name, | ||
| 1376 | mgp->product_code_string); | ||
| 1377 | return 0; | ||
| 1378 | } | ||
| 1379 | } | ||
| 1380 | if (*ptr == 'R' || *ptr == 'Q') { | ||
| 1381 | /* We've found either an XFP or quad ribbon fiber */ | ||
| 1382 | cmd->port = PORT_FIBRE; | ||
| 1383 | } | ||
| 1309 | return 0; | 1384 | return 0; |
| 1310 | } | 1385 | } |
| 1311 | 1386 | ||
| @@ -1324,6 +1399,7 @@ static int | |||
| 1324 | myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) | 1399 | myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) |
| 1325 | { | 1400 | { |
| 1326 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1401 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
| 1402 | |||
| 1327 | coal->rx_coalesce_usecs = mgp->intr_coal_delay; | 1403 | coal->rx_coalesce_usecs = mgp->intr_coal_delay; |
| 1328 | return 0; | 1404 | return 0; |
| 1329 | } | 1405 | } |
| @@ -1370,10 +1446,10 @@ myri10ge_get_ringparam(struct net_device *netdev, | |||
| 1370 | { | 1446 | { |
| 1371 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1447 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
| 1372 | 1448 | ||
| 1373 | ring->rx_mini_max_pending = mgp->rx_small.mask + 1; | 1449 | ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; |
| 1374 | ring->rx_max_pending = mgp->rx_big.mask + 1; | 1450 | ring->rx_max_pending = mgp->ss.rx_big.mask + 1; |
| 1375 | ring->rx_jumbo_max_pending = 0; | 1451 | ring->rx_jumbo_max_pending = 0; |
| 1376 | ring->tx_max_pending = mgp->rx_small.mask + 1; | 1452 | ring->tx_max_pending = mgp->ss.rx_small.mask + 1; |
| 1377 | ring->rx_mini_pending = ring->rx_mini_max_pending; | 1453 | ring->rx_mini_pending = ring->rx_mini_max_pending; |
| 1378 | ring->rx_pending = ring->rx_max_pending; | 1454 | ring->rx_pending = ring->rx_max_pending; |
| 1379 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; | 1455 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; |
| @@ -1383,6 +1459,7 @@ myri10ge_get_ringparam(struct net_device *netdev, | |||
| 1383 | static u32 myri10ge_get_rx_csum(struct net_device *netdev) | 1459 | static u32 myri10ge_get_rx_csum(struct net_device *netdev) |
| 1384 | { | 1460 | { |
| 1385 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1461 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
| 1462 | |||
| 1386 | if (mgp->csum_flag) | 1463 | if (mgp->csum_flag) |
| 1387 | return 1; | 1464 | return 1; |
| 1388 | else | 1465 | else |
| @@ -1392,6 +1469,7 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev) | |||
| 1392 | static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) | 1469 | static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) |
| 1393 | { | 1470 | { |
| 1394 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1471 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
| 1472 | |||
| 1395 | if (csum_enabled) | 1473 | if (csum_enabled) |
| 1396 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | 1474 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; |
| 1397 | else | 1475 | else |
| @@ -1411,7 +1489,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) | |||
| 1411 | return 0; | 1489 | return 0; |
| 1412 | } | 1490 | } |
| 1413 | 1491 | ||
| 1414 | static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | 1492 | static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { |
| 1415 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | 1493 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", |
| 1416 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | 1494 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", |
| 1417 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | 1495 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", |
| @@ -1421,28 +1499,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | |||
| 1421 | /* device-specific stats */ | 1499 | /* device-specific stats */ |
| 1422 | "tx_boundary", "WC", "irq", "MSI", | 1500 | "tx_boundary", "WC", "irq", "MSI", |
| 1423 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | 1501 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
| 1424 | "serial_number", "tx_pkt_start", "tx_pkt_done", | 1502 | "serial_number", "watchdog_resets", |
| 1425 | "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", | ||
| 1426 | "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", | ||
| 1427 | "link_changes", "link_up", "dropped_link_overflow", | 1503 | "link_changes", "link_up", "dropped_link_overflow", |
| 1428 | "dropped_link_error_or_filtered", | 1504 | "dropped_link_error_or_filtered", |
| 1429 | "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", | 1505 | "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", |
| 1430 | "dropped_unicast_filtered", "dropped_multicast_filtered", | 1506 | "dropped_unicast_filtered", "dropped_multicast_filtered", |
| 1431 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", | 1507 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", |
| 1432 | "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", | 1508 | "dropped_no_big_buffer" |
| 1509 | }; | ||
| 1510 | |||
| 1511 | static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { | ||
| 1512 | "----------- slice ---------", | ||
| 1513 | "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", | ||
| 1514 | "rx_small_cnt", "rx_big_cnt", | ||
| 1515 | "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", | ||
| 1516 | "LRO flushed", | ||
| 1433 | "LRO avg aggr", "LRO no_desc" | 1517 | "LRO avg aggr", "LRO no_desc" |
| 1434 | }; | 1518 | }; |
| 1435 | 1519 | ||
| 1436 | #define MYRI10GE_NET_STATS_LEN 21 | 1520 | #define MYRI10GE_NET_STATS_LEN 21 |
| 1437 | #define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) | 1521 | #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) |
| 1522 | #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) | ||
| 1438 | 1523 | ||
| 1439 | static void | 1524 | static void |
| 1440 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) | 1525 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) |
| 1441 | { | 1526 | { |
| 1442 | switch (stringset) { | 1527 | switch (stringset) { |
| 1443 | case ETH_SS_STATS: | 1528 | case ETH_SS_STATS: |
| 1444 | memcpy(data, *myri10ge_gstrings_stats, | 1529 | memcpy(data, *myri10ge_gstrings_main_stats, |
| 1445 | sizeof(myri10ge_gstrings_stats)); | 1530 | sizeof(myri10ge_gstrings_main_stats)); |
| 1531 | data += sizeof(myri10ge_gstrings_main_stats); | ||
| 1532 | memcpy(data, *myri10ge_gstrings_slice_stats, | ||
| 1533 | sizeof(myri10ge_gstrings_slice_stats)); | ||
| 1534 | data += sizeof(myri10ge_gstrings_slice_stats); | ||
| 1446 | break; | 1535 | break; |
| 1447 | } | 1536 | } |
| 1448 | } | 1537 | } |
| @@ -1451,7 +1540,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset) | |||
| 1451 | { | 1540 | { |
| 1452 | switch (sset) { | 1541 | switch (sset) { |
| 1453 | case ETH_SS_STATS: | 1542 | case ETH_SS_STATS: |
| 1454 | return MYRI10GE_STATS_LEN; | 1543 | return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; |
| 1455 | default: | 1544 | default: |
| 1456 | return -EOPNOTSUPP; | 1545 | return -EOPNOTSUPP; |
| 1457 | } | 1546 | } |
| @@ -1462,12 +1551,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
| 1462 | struct ethtool_stats *stats, u64 * data) | 1551 | struct ethtool_stats *stats, u64 * data) |
| 1463 | { | 1552 | { |
| 1464 | struct myri10ge_priv *mgp = netdev_priv(netdev); | 1553 | struct myri10ge_priv *mgp = netdev_priv(netdev); |
| 1554 | struct myri10ge_slice_state *ss; | ||
| 1465 | int i; | 1555 | int i; |
| 1466 | 1556 | ||
| 1467 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) | 1557 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) |
| 1468 | data[i] = ((unsigned long *)&mgp->stats)[i]; | 1558 | data[i] = ((unsigned long *)&mgp->stats)[i]; |
| 1469 | 1559 | ||
| 1470 | data[i++] = (unsigned int)mgp->tx.boundary; | 1560 | data[i++] = (unsigned int)mgp->tx_boundary; |
| 1471 | data[i++] = (unsigned int)mgp->wc_enabled; | 1561 | data[i++] = (unsigned int)mgp->wc_enabled; |
| 1472 | data[i++] = (unsigned int)mgp->pdev->irq; | 1562 | data[i++] = (unsigned int)mgp->pdev->irq; |
| 1473 | data[i++] = (unsigned int)mgp->msi_enabled; | 1563 | data[i++] = (unsigned int)mgp->msi_enabled; |
| @@ -1475,40 +1565,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
| 1475 | data[i++] = (unsigned int)mgp->write_dma; | 1565 | data[i++] = (unsigned int)mgp->write_dma; |
| 1476 | data[i++] = (unsigned int)mgp->read_write_dma; | 1566 | data[i++] = (unsigned int)mgp->read_write_dma; |
| 1477 | data[i++] = (unsigned int)mgp->serial_number; | 1567 | data[i++] = (unsigned int)mgp->serial_number; |
| 1478 | data[i++] = (unsigned int)mgp->tx.pkt_start; | ||
| 1479 | data[i++] = (unsigned int)mgp->tx.pkt_done; | ||
| 1480 | data[i++] = (unsigned int)mgp->tx.req; | ||
| 1481 | data[i++] = (unsigned int)mgp->tx.done; | ||
| 1482 | data[i++] = (unsigned int)mgp->rx_small.cnt; | ||
| 1483 | data[i++] = (unsigned int)mgp->rx_big.cnt; | ||
| 1484 | data[i++] = (unsigned int)mgp->wake_queue; | ||
| 1485 | data[i++] = (unsigned int)mgp->stop_queue; | ||
| 1486 | data[i++] = (unsigned int)mgp->watchdog_resets; | 1568 | data[i++] = (unsigned int)mgp->watchdog_resets; |
| 1487 | data[i++] = (unsigned int)mgp->tx_linearized; | ||
| 1488 | data[i++] = (unsigned int)mgp->link_changes; | 1569 | data[i++] = (unsigned int)mgp->link_changes; |
| 1489 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); | 1570 | |
| 1490 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); | 1571 | /* firmware stats are useful only in the first slice */ |
| 1491 | data[i++] = | 1572 | ss = &mgp->ss; |
| 1492 | (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); | 1573 | data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); |
| 1493 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); | 1574 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); |
| 1494 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); | ||
| 1495 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); | ||
| 1496 | data[i++] = | 1575 | data[i++] = |
| 1497 | (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); | 1576 | (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); |
| 1577 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); | ||
| 1578 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); | ||
| 1579 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); | ||
| 1580 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); | ||
| 1498 | data[i++] = | 1581 | data[i++] = |
| 1499 | (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); | 1582 | (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); |
| 1500 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); | 1583 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); |
| 1501 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); | 1584 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); |
| 1502 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); | 1585 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); |
| 1503 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); | 1586 | data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); |
| 1504 | data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; | 1587 | |
| 1505 | data[i++] = mgp->rx_done.lro_mgr.stats.flushed; | 1588 | data[i++] = 0; |
| 1506 | if (mgp->rx_done.lro_mgr.stats.flushed) | 1589 | data[i++] = (unsigned int)ss->tx.pkt_start; |
| 1507 | data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / | 1590 | data[i++] = (unsigned int)ss->tx.pkt_done; |
| 1508 | mgp->rx_done.lro_mgr.stats.flushed; | 1591 | data[i++] = (unsigned int)ss->tx.req; |
| 1592 | data[i++] = (unsigned int)ss->tx.done; | ||
| 1593 | data[i++] = (unsigned int)ss->rx_small.cnt; | ||
| 1594 | data[i++] = (unsigned int)ss->rx_big.cnt; | ||
| 1595 | data[i++] = (unsigned int)ss->tx.wake_queue; | ||
| 1596 | data[i++] = (unsigned int)ss->tx.stop_queue; | ||
| 1597 | data[i++] = (unsigned int)ss->tx.linearized; | ||
| 1598 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated; | ||
| 1599 | data[i++] = ss->rx_done.lro_mgr.stats.flushed; | ||
| 1600 | if (ss->rx_done.lro_mgr.stats.flushed) | ||
| 1601 | data[i++] = ss->rx_done.lro_mgr.stats.aggregated / | ||
| 1602 | ss->rx_done.lro_mgr.stats.flushed; | ||
| 1509 | else | 1603 | else |
| 1510 | data[i++] = 0; | 1604 | data[i++] = 0; |
| 1511 | data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; | 1605 | data[i++] = ss->rx_done.lro_mgr.stats.no_desc; |
| 1512 | } | 1606 | } |
| 1513 | 1607 | ||
| 1514 | static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) | 1608 | static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) |
| @@ -1544,19 +1638,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { | |||
| 1544 | .get_msglevel = myri10ge_get_msglevel | 1638 | .get_msglevel = myri10ge_get_msglevel |
| 1545 | }; | 1639 | }; |
| 1546 | 1640 | ||
| 1547 | static int myri10ge_allocate_rings(struct net_device *dev) | 1641 | static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) |
| 1548 | { | 1642 | { |
| 1549 | struct myri10ge_priv *mgp; | 1643 | struct myri10ge_priv *mgp = ss->mgp; |
| 1550 | struct myri10ge_cmd cmd; | 1644 | struct myri10ge_cmd cmd; |
| 1645 | struct net_device *dev = mgp->dev; | ||
| 1551 | int tx_ring_size, rx_ring_size; | 1646 | int tx_ring_size, rx_ring_size; |
| 1552 | int tx_ring_entries, rx_ring_entries; | 1647 | int tx_ring_entries, rx_ring_entries; |
| 1553 | int i, status; | 1648 | int i, status; |
| 1554 | size_t bytes; | 1649 | size_t bytes; |
| 1555 | 1650 | ||
| 1556 | mgp = netdev_priv(dev); | ||
| 1557 | |||
| 1558 | /* get ring sizes */ | 1651 | /* get ring sizes */ |
| 1559 | |||
| 1560 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); | 1652 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); |
| 1561 | tx_ring_size = cmd.data0; | 1653 | tx_ring_size = cmd.data0; |
| 1562 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); | 1654 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); |
| @@ -1566,144 +1658,142 @@ static int myri10ge_allocate_rings(struct net_device *dev) | |||
| 1566 | 1658 | ||
| 1567 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); | 1659 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); |
| 1568 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); | 1660 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); |
| 1569 | mgp->tx.mask = tx_ring_entries - 1; | 1661 | ss->tx.mask = tx_ring_entries - 1; |
| 1570 | mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; | 1662 | ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; |
| 1571 | 1663 | ||
| 1572 | status = -ENOMEM; | 1664 | status = -ENOMEM; |
| 1573 | 1665 | ||
| 1574 | /* allocate the host shadow rings */ | 1666 | /* allocate the host shadow rings */ |
| 1575 | 1667 | ||
| 1576 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) | 1668 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) |
| 1577 | * sizeof(*mgp->tx.req_list); | 1669 | * sizeof(*ss->tx.req_list); |
| 1578 | mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); | 1670 | ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); |
| 1579 | if (mgp->tx.req_bytes == NULL) | 1671 | if (ss->tx.req_bytes == NULL) |
| 1580 | goto abort_with_nothing; | 1672 | goto abort_with_nothing; |
| 1581 | 1673 | ||
| 1582 | /* ensure req_list entries are aligned to 8 bytes */ | 1674 | /* ensure req_list entries are aligned to 8 bytes */ |
| 1583 | mgp->tx.req_list = (struct mcp_kreq_ether_send *) | 1675 | ss->tx.req_list = (struct mcp_kreq_ether_send *) |
| 1584 | ALIGN((unsigned long)mgp->tx.req_bytes, 8); | 1676 | ALIGN((unsigned long)ss->tx.req_bytes, 8); |
| 1585 | 1677 | ||
| 1586 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); | 1678 | bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); |
| 1587 | mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); | 1679 | ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); |
| 1588 | if (mgp->rx_small.shadow == NULL) | 1680 | if (ss->rx_small.shadow == NULL) |
| 1589 | goto abort_with_tx_req_bytes; | 1681 | goto abort_with_tx_req_bytes; |
| 1590 | 1682 | ||
| 1591 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); | 1683 | bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); |
| 1592 | mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); | 1684 | ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); |
| 1593 | if (mgp->rx_big.shadow == NULL) | 1685 | if (ss->rx_big.shadow == NULL) |
| 1594 | goto abort_with_rx_small_shadow; | 1686 | goto abort_with_rx_small_shadow; |
| 1595 | 1687 | ||
| 1596 | /* allocate the host info rings */ | 1688 | /* allocate the host info rings */ |
| 1597 | 1689 | ||
| 1598 | bytes = tx_ring_entries * sizeof(*mgp->tx.info); | 1690 | bytes = tx_ring_entries * sizeof(*ss->tx.info); |
| 1599 | mgp->tx.info = kzalloc(bytes, GFP_KERNEL); | 1691 | ss->tx.info = kzalloc(bytes, GFP_KERNEL); |
| 1600 | if (mgp->tx.info == NULL) | 1692 | if (ss->tx.info == NULL) |
| 1601 | goto abort_with_rx_big_shadow; | 1693 | goto abort_with_rx_big_shadow; |
| 1602 | 1694 | ||
| 1603 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); | 1695 | bytes = rx_ring_entries * sizeof(*ss->rx_small.info); |
| 1604 | mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); | 1696 | ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); |
| 1605 | if (mgp->rx_small.info == NULL) | 1697 | if (ss->rx_small.info == NULL) |
| 1606 | goto abort_with_tx_info; | 1698 | goto abort_with_tx_info; |
| 1607 | 1699 | ||
| 1608 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); | 1700 | bytes = rx_ring_entries * sizeof(*ss->rx_big.info); |
| 1609 | mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); | 1701 | ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); |
| 1610 | if (mgp->rx_big.info == NULL) | 1702 | if (ss->rx_big.info == NULL) |
| 1611 | goto abort_with_rx_small_info; | 1703 | goto abort_with_rx_small_info; |
| 1612 | 1704 | ||
| 1613 | /* Fill the receive rings */ | 1705 | /* Fill the receive rings */ |
| 1614 | mgp->rx_big.cnt = 0; | 1706 | ss->rx_big.cnt = 0; |
| 1615 | mgp->rx_small.cnt = 0; | 1707 | ss->rx_small.cnt = 0; |
| 1616 | mgp->rx_big.fill_cnt = 0; | 1708 | ss->rx_big.fill_cnt = 0; |
| 1617 | mgp->rx_small.fill_cnt = 0; | 1709 | ss->rx_small.fill_cnt = 0; |
| 1618 | mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; | 1710 | ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; |
| 1619 | mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; | 1711 | ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; |
| 1620 | mgp->rx_small.watchdog_needed = 0; | 1712 | ss->rx_small.watchdog_needed = 0; |
| 1621 | mgp->rx_big.watchdog_needed = 0; | 1713 | ss->rx_big.watchdog_needed = 0; |
| 1622 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 1714 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, |
| 1623 | mgp->small_bytes + MXGEFW_PAD, 0); | 1715 | mgp->small_bytes + MXGEFW_PAD, 0); |
| 1624 | 1716 | ||
| 1625 | if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { | 1717 | if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { |
| 1626 | printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", | 1718 | printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", |
| 1627 | dev->name, mgp->rx_small.fill_cnt); | 1719 | dev->name, ss->rx_small.fill_cnt); |
| 1628 | goto abort_with_rx_small_ring; | 1720 | goto abort_with_rx_small_ring; |
| 1629 | } | 1721 | } |
| 1630 | 1722 | ||
| 1631 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | 1723 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); |
| 1632 | if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { | 1724 | if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { |
| 1633 | printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", | 1725 | printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", |
| 1634 | dev->name, mgp->rx_big.fill_cnt); | 1726 | dev->name, ss->rx_big.fill_cnt); |
| 1635 | goto abort_with_rx_big_ring; | 1727 | goto abort_with_rx_big_ring; |
| 1636 | } | 1728 | } |
| 1637 | 1729 | ||
| 1638 | return 0; | 1730 | return 0; |
| 1639 | 1731 | ||
| 1640 | abort_with_rx_big_ring: | 1732 | abort_with_rx_big_ring: |
| 1641 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { | 1733 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
| 1642 | int idx = i & mgp->rx_big.mask; | 1734 | int idx = i & ss->rx_big.mask; |
| 1643 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], | 1735 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], |
| 1644 | mgp->big_bytes); | 1736 | mgp->big_bytes); |
| 1645 | put_page(mgp->rx_big.info[idx].page); | 1737 | put_page(ss->rx_big.info[idx].page); |
| 1646 | } | 1738 | } |
| 1647 | 1739 | ||
| 1648 | abort_with_rx_small_ring: | 1740 | abort_with_rx_small_ring: |
| 1649 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { | 1741 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
| 1650 | int idx = i & mgp->rx_small.mask; | 1742 | int idx = i & ss->rx_small.mask; |
| 1651 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], | 1743 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], |
| 1652 | mgp->small_bytes + MXGEFW_PAD); | 1744 | mgp->small_bytes + MXGEFW_PAD); |
| 1653 | put_page(mgp->rx_small.info[idx].page); | 1745 | put_page(ss->rx_small.info[idx].page); |
| 1654 | } | 1746 | } |
| 1655 | 1747 | ||
| 1656 | kfree(mgp->rx_big.info); | 1748 | kfree(ss->rx_big.info); |
| 1657 | 1749 | ||
| 1658 | abort_with_rx_small_info: | 1750 | abort_with_rx_small_info: |
| 1659 | kfree(mgp->rx_small.info); | 1751 | kfree(ss->rx_small.info); |
| 1660 | 1752 | ||
| 1661 | abort_with_tx_info: | 1753 | abort_with_tx_info: |
| 1662 | kfree(mgp->tx.info); | 1754 | kfree(ss->tx.info); |
| 1663 | 1755 | ||
| 1664 | abort_with_rx_big_shadow: | 1756 | abort_with_rx_big_shadow: |
| 1665 | kfree(mgp->rx_big.shadow); | 1757 | kfree(ss->rx_big.shadow); |
| 1666 | 1758 | ||
| 1667 | abort_with_rx_small_shadow: | 1759 | abort_with_rx_small_shadow: |
| 1668 | kfree(mgp->rx_small.shadow); | 1760 | kfree(ss->rx_small.shadow); |
| 1669 | 1761 | ||
| 1670 | abort_with_tx_req_bytes: | 1762 | abort_with_tx_req_bytes: |
| 1671 | kfree(mgp->tx.req_bytes); | 1763 | kfree(ss->tx.req_bytes); |
| 1672 | mgp->tx.req_bytes = NULL; | 1764 | ss->tx.req_bytes = NULL; |
| 1673 | mgp->tx.req_list = NULL; | 1765 | ss->tx.req_list = NULL; |
| 1674 | 1766 | ||
| 1675 | abort_with_nothing: | 1767 | abort_with_nothing: |
| 1676 | return status; | 1768 | return status; |
| 1677 | } | 1769 | } |
| 1678 | 1770 | ||
| 1679 | static void myri10ge_free_rings(struct net_device *dev) | 1771 | static void myri10ge_free_rings(struct myri10ge_slice_state *ss) |
| 1680 | { | 1772 | { |
| 1681 | struct myri10ge_priv *mgp; | 1773 | struct myri10ge_priv *mgp = ss->mgp; |
| 1682 | struct sk_buff *skb; | 1774 | struct sk_buff *skb; |
| 1683 | struct myri10ge_tx_buf *tx; | 1775 | struct myri10ge_tx_buf *tx; |
| 1684 | int i, len, idx; | 1776 | int i, len, idx; |
| 1685 | 1777 | ||
| 1686 | mgp = netdev_priv(dev); | 1778 | for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { |
| 1687 | 1779 | idx = i & ss->rx_big.mask; | |
| 1688 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { | 1780 | if (i == ss->rx_big.fill_cnt - 1) |
| 1689 | idx = i & mgp->rx_big.mask; | 1781 | ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; |
| 1690 | if (i == mgp->rx_big.fill_cnt - 1) | 1782 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], |
| 1691 | mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; | ||
| 1692 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], | ||
| 1693 | mgp->big_bytes); | 1783 | mgp->big_bytes); |
| 1694 | put_page(mgp->rx_big.info[idx].page); | 1784 | put_page(ss->rx_big.info[idx].page); |
| 1695 | } | 1785 | } |
| 1696 | 1786 | ||
| 1697 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { | 1787 | for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { |
| 1698 | idx = i & mgp->rx_small.mask; | 1788 | idx = i & ss->rx_small.mask; |
| 1699 | if (i == mgp->rx_small.fill_cnt - 1) | 1789 | if (i == ss->rx_small.fill_cnt - 1) |
| 1700 | mgp->rx_small.info[idx].page_offset = | 1790 | ss->rx_small.info[idx].page_offset = |
| 1701 | MYRI10GE_ALLOC_SIZE; | 1791 | MYRI10GE_ALLOC_SIZE; |
| 1702 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], | 1792 | myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], |
| 1703 | mgp->small_bytes + MXGEFW_PAD); | 1793 | mgp->small_bytes + MXGEFW_PAD); |
| 1704 | put_page(mgp->rx_small.info[idx].page); | 1794 | put_page(ss->rx_small.info[idx].page); |
| 1705 | } | 1795 | } |
| 1706 | tx = &mgp->tx; | 1796 | tx = &ss->tx; |
| 1707 | while (tx->done != tx->req) { | 1797 | while (tx->done != tx->req) { |
| 1708 | idx = tx->done & tx->mask; | 1798 | idx = tx->done & tx->mask; |
| 1709 | skb = tx->info[idx].skb; | 1799 | skb = tx->info[idx].skb; |
| @@ -1714,7 +1804,7 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
| 1714 | len = pci_unmap_len(&tx->info[idx], len); | 1804 | len = pci_unmap_len(&tx->info[idx], len); |
| 1715 | pci_unmap_len_set(&tx->info[idx], len, 0); | 1805 | pci_unmap_len_set(&tx->info[idx], len, 0); |
| 1716 | if (skb) { | 1806 | if (skb) { |
| 1717 | mgp->stats.tx_dropped++; | 1807 | ss->stats.tx_dropped++; |
| 1718 | dev_kfree_skb_any(skb); | 1808 | dev_kfree_skb_any(skb); |
| 1719 | if (len) | 1809 | if (len) |
| 1720 | pci_unmap_single(mgp->pdev, | 1810 | pci_unmap_single(mgp->pdev, |
| @@ -1729,19 +1819,19 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
| 1729 | PCI_DMA_TODEVICE); | 1819 | PCI_DMA_TODEVICE); |
| 1730 | } | 1820 | } |
| 1731 | } | 1821 | } |
| 1732 | kfree(mgp->rx_big.info); | 1822 | kfree(ss->rx_big.info); |
| 1733 | 1823 | ||
| 1734 | kfree(mgp->rx_small.info); | 1824 | kfree(ss->rx_small.info); |
| 1735 | 1825 | ||
| 1736 | kfree(mgp->tx.info); | 1826 | kfree(ss->tx.info); |
| 1737 | 1827 | ||
| 1738 | kfree(mgp->rx_big.shadow); | 1828 | kfree(ss->rx_big.shadow); |
| 1739 | 1829 | ||
| 1740 | kfree(mgp->rx_small.shadow); | 1830 | kfree(ss->rx_small.shadow); |
| 1741 | 1831 | ||
| 1742 | kfree(mgp->tx.req_bytes); | 1832 | kfree(ss->tx.req_bytes); |
| 1743 | mgp->tx.req_bytes = NULL; | 1833 | ss->tx.req_bytes = NULL; |
| 1744 | mgp->tx.req_list = NULL; | 1834 | ss->tx.req_list = NULL; |
| 1745 | } | 1835 | } |
| 1746 | 1836 | ||
| 1747 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) | 1837 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) |
| @@ -1840,13 +1930,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | |||
| 1840 | 1930 | ||
| 1841 | static int myri10ge_open(struct net_device *dev) | 1931 | static int myri10ge_open(struct net_device *dev) |
| 1842 | { | 1932 | { |
| 1843 | struct myri10ge_priv *mgp; | 1933 | struct myri10ge_priv *mgp = netdev_priv(dev); |
| 1844 | struct myri10ge_cmd cmd; | 1934 | struct myri10ge_cmd cmd; |
| 1845 | struct net_lro_mgr *lro_mgr; | 1935 | struct net_lro_mgr *lro_mgr; |
| 1846 | int status, big_pow2; | 1936 | int status, big_pow2; |
| 1847 | 1937 | ||
| 1848 | mgp = netdev_priv(dev); | ||
| 1849 | |||
| 1850 | if (mgp->running != MYRI10GE_ETH_STOPPED) | 1938 | if (mgp->running != MYRI10GE_ETH_STOPPED) |
| 1851 | return -EBUSY; | 1939 | return -EBUSY; |
| 1852 | 1940 | ||
| @@ -1883,16 +1971,16 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1883 | /* get the lanai pointers to the send and receive rings */ | 1971 | /* get the lanai pointers to the send and receive rings */ |
| 1884 | 1972 | ||
| 1885 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); | 1973 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); |
| 1886 | mgp->tx.lanai = | 1974 | mgp->ss.tx.lanai = |
| 1887 | (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); | 1975 | (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); |
| 1888 | 1976 | ||
| 1889 | status |= | 1977 | status |= |
| 1890 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); | 1978 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); |
| 1891 | mgp->rx_small.lanai = | 1979 | mgp->ss.rx_small.lanai = |
| 1892 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | 1980 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); |
| 1893 | 1981 | ||
| 1894 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); | 1982 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); |
| 1895 | mgp->rx_big.lanai = | 1983 | mgp->ss.rx_big.lanai = |
| 1896 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | 1984 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); |
| 1897 | 1985 | ||
| 1898 | if (status != 0) { | 1986 | if (status != 0) { |
| @@ -1904,15 +1992,15 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1904 | } | 1992 | } |
| 1905 | 1993 | ||
| 1906 | if (myri10ge_wcfifo && mgp->wc_enabled) { | 1994 | if (myri10ge_wcfifo && mgp->wc_enabled) { |
| 1907 | mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; | 1995 | mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; |
| 1908 | mgp->rx_small.wc_fifo = | 1996 | mgp->ss.rx_small.wc_fifo = |
| 1909 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; | 1997 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; |
| 1910 | mgp->rx_big.wc_fifo = | 1998 | mgp->ss.rx_big.wc_fifo = |
| 1911 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; | 1999 | (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; |
| 1912 | } else { | 2000 | } else { |
| 1913 | mgp->tx.wc_fifo = NULL; | 2001 | mgp->ss.tx.wc_fifo = NULL; |
| 1914 | mgp->rx_small.wc_fifo = NULL; | 2002 | mgp->ss.rx_small.wc_fifo = NULL; |
| 1915 | mgp->rx_big.wc_fifo = NULL; | 2003 | mgp->ss.rx_big.wc_fifo = NULL; |
| 1916 | } | 2004 | } |
| 1917 | 2005 | ||
| 1918 | /* Firmware needs the big buff size as a power of 2. Lie and | 2006 | /* Firmware needs the big buff size as a power of 2. Lie and |
| @@ -1929,7 +2017,7 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1929 | mgp->big_bytes = big_pow2; | 2017 | mgp->big_bytes = big_pow2; |
| 1930 | } | 2018 | } |
| 1931 | 2019 | ||
| 1932 | status = myri10ge_allocate_rings(dev); | 2020 | status = myri10ge_allocate_rings(&mgp->ss); |
| 1933 | if (status != 0) | 2021 | if (status != 0) |
| 1934 | goto abort_with_irq; | 2022 | goto abort_with_irq; |
| 1935 | 2023 | ||
| @@ -1948,12 +2036,12 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1948 | goto abort_with_rings; | 2036 | goto abort_with_rings; |
| 1949 | } | 2037 | } |
| 1950 | 2038 | ||
| 1951 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); | 2039 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); |
| 1952 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); | 2040 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); |
| 1953 | cmd.data2 = sizeof(struct mcp_irq_data); | 2041 | cmd.data2 = sizeof(struct mcp_irq_data); |
| 1954 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); | 2042 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); |
| 1955 | if (status == -ENOSYS) { | 2043 | if (status == -ENOSYS) { |
| 1956 | dma_addr_t bus = mgp->fw_stats_bus; | 2044 | dma_addr_t bus = mgp->ss.fw_stats_bus; |
| 1957 | bus += offsetof(struct mcp_irq_data, send_done_count); | 2045 | bus += offsetof(struct mcp_irq_data, send_done_count); |
| 1958 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); | 2046 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); |
| 1959 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); | 2047 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); |
| @@ -1974,20 +2062,20 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1974 | mgp->link_state = ~0U; | 2062 | mgp->link_state = ~0U; |
| 1975 | mgp->rdma_tags_available = 15; | 2063 | mgp->rdma_tags_available = 15; |
| 1976 | 2064 | ||
| 1977 | lro_mgr = &mgp->rx_done.lro_mgr; | 2065 | lro_mgr = &mgp->ss.rx_done.lro_mgr; |
| 1978 | lro_mgr->dev = dev; | 2066 | lro_mgr->dev = dev; |
| 1979 | lro_mgr->features = LRO_F_NAPI; | 2067 | lro_mgr->features = LRO_F_NAPI; |
| 1980 | lro_mgr->ip_summed = CHECKSUM_COMPLETE; | 2068 | lro_mgr->ip_summed = CHECKSUM_COMPLETE; |
| 1981 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | 2069 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; |
| 1982 | lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; | 2070 | lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; |
| 1983 | lro_mgr->lro_arr = mgp->rx_done.lro_desc; | 2071 | lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; |
| 1984 | lro_mgr->get_frag_header = myri10ge_get_frag_header; | 2072 | lro_mgr->get_frag_header = myri10ge_get_frag_header; |
| 1985 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; | 2073 | lro_mgr->max_aggr = myri10ge_lro_max_pkts; |
| 1986 | lro_mgr->frag_align_pad = 2; | 2074 | lro_mgr->frag_align_pad = 2; |
| 1987 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | 2075 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) |
| 1988 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | 2076 | lro_mgr->max_aggr = MAX_SKB_FRAGS; |
| 1989 | 2077 | ||
| 1990 | napi_enable(&mgp->napi); /* must happen prior to any irq */ | 2078 | napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ |
| 1991 | 2079 | ||
| 1992 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); | 2080 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); |
| 1993 | if (status) { | 2081 | if (status) { |
| @@ -1996,8 +2084,8 @@ static int myri10ge_open(struct net_device *dev) | |||
| 1996 | goto abort_with_rings; | 2084 | goto abort_with_rings; |
| 1997 | } | 2085 | } |
| 1998 | 2086 | ||
| 1999 | mgp->wake_queue = 0; | 2087 | mgp->ss.tx.wake_queue = 0; |
| 2000 | mgp->stop_queue = 0; | 2088 | mgp->ss.tx.stop_queue = 0; |
| 2001 | mgp->running = MYRI10GE_ETH_RUNNING; | 2089 | mgp->running = MYRI10GE_ETH_RUNNING; |
| 2002 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; | 2090 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; |
| 2003 | add_timer(&mgp->watchdog_timer); | 2091 | add_timer(&mgp->watchdog_timer); |
| @@ -2005,7 +2093,7 @@ static int myri10ge_open(struct net_device *dev) | |||
| 2005 | return 0; | 2093 | return 0; |
| 2006 | 2094 | ||
| 2007 | abort_with_rings: | 2095 | abort_with_rings: |
| 2008 | myri10ge_free_rings(dev); | 2096 | myri10ge_free_rings(&mgp->ss); |
| 2009 | 2097 | ||
| 2010 | abort_with_irq: | 2098 | abort_with_irq: |
| 2011 | myri10ge_free_irq(mgp); | 2099 | myri10ge_free_irq(mgp); |
| @@ -2017,21 +2105,19 @@ abort_with_nothing: | |||
| 2017 | 2105 | ||
| 2018 | static int myri10ge_close(struct net_device *dev) | 2106 | static int myri10ge_close(struct net_device *dev) |
| 2019 | { | 2107 | { |
| 2020 | struct myri10ge_priv *mgp; | 2108 | struct myri10ge_priv *mgp = netdev_priv(dev); |
| 2021 | struct myri10ge_cmd cmd; | 2109 | struct myri10ge_cmd cmd; |
| 2022 | int status, old_down_cnt; | 2110 | int status, old_down_cnt; |
| 2023 | 2111 | ||
| 2024 | mgp = netdev_priv(dev); | ||
| 2025 | |||
| 2026 | if (mgp->running != MYRI10GE_ETH_RUNNING) | 2112 | if (mgp->running != MYRI10GE_ETH_RUNNING) |
| 2027 | return 0; | 2113 | return 0; |
| 2028 | 2114 | ||
| 2029 | if (mgp->tx.req_bytes == NULL) | 2115 | if (mgp->ss.tx.req_bytes == NULL) |
| 2030 | return 0; | 2116 | return 0; |
| 2031 | 2117 | ||
| 2032 | del_timer_sync(&mgp->watchdog_timer); | 2118 | del_timer_sync(&mgp->watchdog_timer); |
| 2033 | mgp->running = MYRI10GE_ETH_STOPPING; | 2119 | mgp->running = MYRI10GE_ETH_STOPPING; |
| 2034 | napi_disable(&mgp->napi); | 2120 | napi_disable(&mgp->ss.napi); |
| 2035 | netif_carrier_off(dev); | 2121 | netif_carrier_off(dev); |
| 2036 | netif_stop_queue(dev); | 2122 | netif_stop_queue(dev); |
| 2037 | old_down_cnt = mgp->down_cnt; | 2123 | old_down_cnt = mgp->down_cnt; |
| @@ -2047,7 +2133,7 @@ static int myri10ge_close(struct net_device *dev) | |||
| 2047 | 2133 | ||
| 2048 | netif_tx_disable(dev); | 2134 | netif_tx_disable(dev); |
| 2049 | myri10ge_free_irq(mgp); | 2135 | myri10ge_free_irq(mgp); |
| 2050 | myri10ge_free_rings(dev); | 2136 | myri10ge_free_rings(&mgp->ss); |
| 2051 | 2137 | ||
| 2052 | mgp->running = MYRI10GE_ETH_STOPPED; | 2138 | mgp->running = MYRI10GE_ETH_STOPPED; |
| 2053 | return 0; | 2139 | return 0; |
| @@ -2143,7 +2229,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | |||
| 2143 | 2229 | ||
| 2144 | /* | 2230 | /* |
| 2145 | * Transmit a packet. We need to split the packet so that a single | 2231 | * Transmit a packet. We need to split the packet so that a single |
| 2146 | * segment does not cross myri10ge->tx.boundary, so this makes segment | 2232 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
| 2147 | * counting tricky. So rather than try to count segments up front, we | 2233 | * counting tricky. So rather than try to count segments up front, we |
| 2148 | * just give up if there are too few segments to hold a reasonably | 2234 | * just give up if there are too few segments to hold a reasonably |
| 2149 | * fragmented packet currently available. If we run | 2235 | * fragmented packet currently available. If we run |
| @@ -2154,8 +2240,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | |||
| 2154 | static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | 2240 | static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) |
| 2155 | { | 2241 | { |
| 2156 | struct myri10ge_priv *mgp = netdev_priv(dev); | 2242 | struct myri10ge_priv *mgp = netdev_priv(dev); |
| 2243 | struct myri10ge_slice_state *ss; | ||
| 2157 | struct mcp_kreq_ether_send *req; | 2244 | struct mcp_kreq_ether_send *req; |
| 2158 | struct myri10ge_tx_buf *tx = &mgp->tx; | 2245 | struct myri10ge_tx_buf *tx; |
| 2159 | struct skb_frag_struct *frag; | 2246 | struct skb_frag_struct *frag; |
| 2160 | dma_addr_t bus; | 2247 | dma_addr_t bus; |
| 2161 | u32 low; | 2248 | u32 low; |
| @@ -2166,6 +2253,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2166 | int cum_len, seglen, boundary, rdma_count; | 2253 | int cum_len, seglen, boundary, rdma_count; |
| 2167 | u8 flags, odd_flag; | 2254 | u8 flags, odd_flag; |
| 2168 | 2255 | ||
| 2256 | /* always transmit through slot 0 */ | ||
| 2257 | ss = &mgp->ss; | ||
| 2258 | tx = &ss->tx; | ||
| 2169 | again: | 2259 | again: |
| 2170 | req = tx->req_list; | 2260 | req = tx->req_list; |
| 2171 | avail = tx->mask - 1 - (tx->req - tx->done); | 2261 | avail = tx->mask - 1 - (tx->req - tx->done); |
| @@ -2180,7 +2270,7 @@ again: | |||
| 2180 | 2270 | ||
| 2181 | if ((unlikely(avail < max_segments))) { | 2271 | if ((unlikely(avail < max_segments))) { |
| 2182 | /* we are out of transmit resources */ | 2272 | /* we are out of transmit resources */ |
| 2183 | mgp->stop_queue++; | 2273 | tx->stop_queue++; |
| 2184 | netif_stop_queue(dev); | 2274 | netif_stop_queue(dev); |
| 2185 | return 1; | 2275 | return 1; |
| 2186 | } | 2276 | } |
| @@ -2242,7 +2332,7 @@ again: | |||
| 2242 | if (skb_padto(skb, ETH_ZLEN)) { | 2332 | if (skb_padto(skb, ETH_ZLEN)) { |
| 2243 | /* The packet is gone, so we must | 2333 | /* The packet is gone, so we must |
| 2244 | * return 0 */ | 2334 | * return 0 */ |
| 2245 | mgp->stats.tx_dropped += 1; | 2335 | ss->stats.tx_dropped += 1; |
| 2246 | return 0; | 2336 | return 0; |
| 2247 | } | 2337 | } |
| 2248 | /* adjust the len to account for the zero pad | 2338 | /* adjust the len to account for the zero pad |
| @@ -2284,7 +2374,7 @@ again: | |||
| 2284 | 2374 | ||
| 2285 | while (1) { | 2375 | while (1) { |
| 2286 | /* Break the SKB or Fragment up into pieces which | 2376 | /* Break the SKB or Fragment up into pieces which |
| 2287 | * do not cross mgp->tx.boundary */ | 2377 | * do not cross mgp->tx_boundary */ |
| 2288 | low = MYRI10GE_LOWPART_TO_U32(bus); | 2378 | low = MYRI10GE_LOWPART_TO_U32(bus); |
| 2289 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | 2379 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); |
| 2290 | while (len) { | 2380 | while (len) { |
| @@ -2294,7 +2384,8 @@ again: | |||
| 2294 | if (unlikely(count == max_segments)) | 2384 | if (unlikely(count == max_segments)) |
| 2295 | goto abort_linearize; | 2385 | goto abort_linearize; |
| 2296 | 2386 | ||
| 2297 | boundary = (low + tx->boundary) & ~(tx->boundary - 1); | 2387 | boundary = |
| 2388 | (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); | ||
| 2298 | seglen = boundary - low; | 2389 | seglen = boundary - low; |
| 2299 | if (seglen > len) | 2390 | if (seglen > len) |
| 2300 | seglen = len; | 2391 | seglen = len; |
| @@ -2378,7 +2469,7 @@ again: | |||
| 2378 | myri10ge_submit_req_wc(tx, tx->req_list, count); | 2469 | myri10ge_submit_req_wc(tx, tx->req_list, count); |
| 2379 | tx->pkt_start++; | 2470 | tx->pkt_start++; |
| 2380 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | 2471 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { |
| 2381 | mgp->stop_queue++; | 2472 | tx->stop_queue++; |
| 2382 | netif_stop_queue(dev); | 2473 | netif_stop_queue(dev); |
| 2383 | } | 2474 | } |
| 2384 | dev->trans_start = jiffies; | 2475 | dev->trans_start = jiffies; |
| @@ -2420,12 +2511,12 @@ abort_linearize: | |||
| 2420 | if (skb_linearize(skb)) | 2511 | if (skb_linearize(skb)) |
| 2421 | goto drop; | 2512 | goto drop; |
| 2422 | 2513 | ||
| 2423 | mgp->tx_linearized++; | 2514 | tx->linearized++; |
| 2424 | goto again; | 2515 | goto again; |
| 2425 | 2516 | ||
| 2426 | drop: | 2517 | drop: |
| 2427 | dev_kfree_skb_any(skb); | 2518 | dev_kfree_skb_any(skb); |
| 2428 | mgp->stats.tx_dropped += 1; | 2519 | ss->stats.tx_dropped += 1; |
| 2429 | return 0; | 2520 | return 0; |
| 2430 | 2521 | ||
| 2431 | } | 2522 | } |
| @@ -2433,7 +2524,7 @@ drop: | |||
| 2433 | static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) | 2524 | static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) |
| 2434 | { | 2525 | { |
| 2435 | struct sk_buff *segs, *curr; | 2526 | struct sk_buff *segs, *curr; |
| 2436 | struct myri10ge_priv *mgp = dev->priv; | 2527 | struct myri10ge_priv *mgp = netdev_priv(dev); |
| 2437 | int status; | 2528 | int status; |
| 2438 | 2529 | ||
| 2439 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); | 2530 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); |
| @@ -2473,14 +2564,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) | |||
| 2473 | 2564 | ||
| 2474 | static void myri10ge_set_multicast_list(struct net_device *dev) | 2565 | static void myri10ge_set_multicast_list(struct net_device *dev) |
| 2475 | { | 2566 | { |
| 2567 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
| 2476 | struct myri10ge_cmd cmd; | 2568 | struct myri10ge_cmd cmd; |
| 2477 | struct myri10ge_priv *mgp; | ||
| 2478 | struct dev_mc_list *mc_list; | 2569 | struct dev_mc_list *mc_list; |
| 2479 | __be32 data[2] = { 0, 0 }; | 2570 | __be32 data[2] = { 0, 0 }; |
| 2480 | int err; | 2571 | int err; |
| 2481 | DECLARE_MAC_BUF(mac); | 2572 | DECLARE_MAC_BUF(mac); |
| 2482 | 2573 | ||
| 2483 | mgp = netdev_priv(dev); | ||
| 2484 | /* can be called from atomic contexts, | 2574 | /* can be called from atomic contexts, |
| 2485 | * pass 1 to force atomicity in myri10ge_send_cmd() */ | 2575 | * pass 1 to force atomicity in myri10ge_send_cmd() */ |
| 2486 | myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); | 2576 | myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); |
| @@ -2616,13 +2706,14 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) | |||
| 2616 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; | 2706 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; |
| 2617 | if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { | 2707 | if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { |
| 2618 | if (myri10ge_ecrc_enable > 1) { | 2708 | if (myri10ge_ecrc_enable > 1) { |
| 2619 | struct pci_dev *old_bridge = bridge; | 2709 | struct pci_dev *prev_bridge, *old_bridge = bridge; |
| 2620 | 2710 | ||
| 2621 | /* Walk the hierarchy up to the root port | 2711 | /* Walk the hierarchy up to the root port |
| 2622 | * where ECRC has to be enabled */ | 2712 | * where ECRC has to be enabled */ |
| 2623 | do { | 2713 | do { |
| 2714 | prev_bridge = bridge; | ||
| 2624 | bridge = bridge->bus->self; | 2715 | bridge = bridge->bus->self; |
| 2625 | if (!bridge) { | 2716 | if (!bridge || prev_bridge == bridge) { |
| 2626 | dev_err(dev, | 2717 | dev_err(dev, |
| 2627 | "Failed to find root port" | 2718 | "Failed to find root port" |
| 2628 | " to force ECRC\n"); | 2719 | " to force ECRC\n"); |
| @@ -2681,9 +2772,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) | |||
| 2681 | * already been enabled, then it must use a firmware image which works | 2772 | * already been enabled, then it must use a firmware image which works |
| 2682 | * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it | 2773 | * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it |
| 2683 | * should also ensure that it never gives the device a Read-DMA which is | 2774 | * should also ensure that it never gives the device a Read-DMA which is |
| 2684 | * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is | 2775 | * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is |
| 2685 | * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) | 2776 | * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) |
| 2686 | * firmware image, and set tx.boundary to 4KB. | 2777 | * firmware image, and set tx_boundary to 4KB. |
| 2687 | */ | 2778 | */ |
| 2688 | 2779 | ||
| 2689 | static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | 2780 | static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) |
| @@ -2692,7 +2783,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
| 2692 | struct device *dev = &pdev->dev; | 2783 | struct device *dev = &pdev->dev; |
| 2693 | int status; | 2784 | int status; |
| 2694 | 2785 | ||
| 2695 | mgp->tx.boundary = 4096; | 2786 | mgp->tx_boundary = 4096; |
| 2696 | /* | 2787 | /* |
| 2697 | * Verify the max read request size was set to 4KB | 2788 | * Verify the max read request size was set to 4KB |
| 2698 | * before trying the test with 4KB. | 2789 | * before trying the test with 4KB. |
| @@ -2704,7 +2795,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
| 2704 | } | 2795 | } |
| 2705 | if (status != 4096) { | 2796 | if (status != 4096) { |
| 2706 | dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); | 2797 | dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); |
| 2707 | mgp->tx.boundary = 2048; | 2798 | mgp->tx_boundary = 2048; |
| 2708 | } | 2799 | } |
| 2709 | /* | 2800 | /* |
| 2710 | * load the optimized firmware (which assumes aligned PCIe | 2801 | * load the optimized firmware (which assumes aligned PCIe |
| @@ -2737,7 +2828,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) | |||
| 2737 | "Please install up to date fw\n"); | 2828 | "Please install up to date fw\n"); |
| 2738 | abort: | 2829 | abort: |
| 2739 | /* fall back to using the unaligned firmware */ | 2830 | /* fall back to using the unaligned firmware */ |
| 2740 | mgp->tx.boundary = 2048; | 2831 | mgp->tx_boundary = 2048; |
| 2741 | mgp->fw_name = myri10ge_fw_unaligned; | 2832 | mgp->fw_name = myri10ge_fw_unaligned; |
| 2742 | 2833 | ||
| 2743 | } | 2834 | } |
| @@ -2758,7 +2849,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |||
| 2758 | if (link_width < 8) { | 2849 | if (link_width < 8) { |
| 2759 | dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", | 2850 | dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", |
| 2760 | link_width); | 2851 | link_width); |
| 2761 | mgp->tx.boundary = 4096; | 2852 | mgp->tx_boundary = 4096; |
| 2762 | mgp->fw_name = myri10ge_fw_aligned; | 2853 | mgp->fw_name = myri10ge_fw_aligned; |
| 2763 | } else { | 2854 | } else { |
| 2764 | myri10ge_firmware_probe(mgp); | 2855 | myri10ge_firmware_probe(mgp); |
| @@ -2767,12 +2858,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |||
| 2767 | if (myri10ge_force_firmware == 1) { | 2858 | if (myri10ge_force_firmware == 1) { |
| 2768 | dev_info(&mgp->pdev->dev, | 2859 | dev_info(&mgp->pdev->dev, |
| 2769 | "Assuming aligned completions (forced)\n"); | 2860 | "Assuming aligned completions (forced)\n"); |
| 2770 | mgp->tx.boundary = 4096; | 2861 | mgp->tx_boundary = 4096; |
| 2771 | mgp->fw_name = myri10ge_fw_aligned; | 2862 | mgp->fw_name = myri10ge_fw_aligned; |
| 2772 | } else { | 2863 | } else { |
| 2773 | dev_info(&mgp->pdev->dev, | 2864 | dev_info(&mgp->pdev->dev, |
| 2774 | "Assuming unaligned completions (forced)\n"); | 2865 | "Assuming unaligned completions (forced)\n"); |
| 2775 | mgp->tx.boundary = 2048; | 2866 | mgp->tx_boundary = 2048; |
| 2776 | mgp->fw_name = myri10ge_fw_unaligned; | 2867 | mgp->fw_name = myri10ge_fw_unaligned; |
| 2777 | } | 2868 | } |
| 2778 | } | 2869 | } |
| @@ -2889,6 +2980,7 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
| 2889 | { | 2980 | { |
| 2890 | struct myri10ge_priv *mgp = | 2981 | struct myri10ge_priv *mgp = |
| 2891 | container_of(work, struct myri10ge_priv, watchdog_work); | 2982 | container_of(work, struct myri10ge_priv, watchdog_work); |
| 2983 | struct myri10ge_tx_buf *tx; | ||
| 2892 | u32 reboot; | 2984 | u32 reboot; |
| 2893 | int status; | 2985 | int status; |
| 2894 | u16 cmd, vendor; | 2986 | u16 cmd, vendor; |
| @@ -2938,15 +3030,16 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
| 2938 | 3030 | ||
| 2939 | printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", | 3031 | printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", |
| 2940 | mgp->dev->name); | 3032 | mgp->dev->name); |
| 3033 | tx = &mgp->ss.tx; | ||
| 2941 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | 3034 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", |
| 2942 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | 3035 | mgp->dev->name, tx->req, tx->done, |
| 2943 | mgp->tx.pkt_start, mgp->tx.pkt_done, | 3036 | tx->pkt_start, tx->pkt_done, |
| 2944 | (int)ntohl(mgp->fw_stats->send_done_count)); | 3037 | (int)ntohl(mgp->ss.fw_stats->send_done_count)); |
| 2945 | msleep(2000); | 3038 | msleep(2000); |
| 2946 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | 3039 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", |
| 2947 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | 3040 | mgp->dev->name, tx->req, tx->done, |
| 2948 | mgp->tx.pkt_start, mgp->tx.pkt_done, | 3041 | tx->pkt_start, tx->pkt_done, |
| 2949 | (int)ntohl(mgp->fw_stats->send_done_count)); | 3042 | (int)ntohl(mgp->ss.fw_stats->send_done_count)); |
| 2950 | } | 3043 | } |
| 2951 | rtnl_lock(); | 3044 | rtnl_lock(); |
| 2952 | myri10ge_close(mgp->dev); | 3045 | myri10ge_close(mgp->dev); |
| @@ -2969,28 +3062,31 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
| 2969 | static void myri10ge_watchdog_timer(unsigned long arg) | 3062 | static void myri10ge_watchdog_timer(unsigned long arg) |
| 2970 | { | 3063 | { |
| 2971 | struct myri10ge_priv *mgp; | 3064 | struct myri10ge_priv *mgp; |
| 3065 | struct myri10ge_slice_state *ss; | ||
| 2972 | u32 rx_pause_cnt; | 3066 | u32 rx_pause_cnt; |
| 2973 | 3067 | ||
| 2974 | mgp = (struct myri10ge_priv *)arg; | 3068 | mgp = (struct myri10ge_priv *)arg; |
| 2975 | 3069 | ||
| 2976 | if (mgp->rx_small.watchdog_needed) { | 3070 | rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); |
| 2977 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | 3071 | |
| 3072 | ss = &mgp->ss; | ||
| 3073 | if (ss->rx_small.watchdog_needed) { | ||
| 3074 | myri10ge_alloc_rx_pages(mgp, &ss->rx_small, | ||
| 2978 | mgp->small_bytes + MXGEFW_PAD, 1); | 3075 | mgp->small_bytes + MXGEFW_PAD, 1); |
| 2979 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= | 3076 | if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= |
| 2980 | myri10ge_fill_thresh) | 3077 | myri10ge_fill_thresh) |
| 2981 | mgp->rx_small.watchdog_needed = 0; | 3078 | ss->rx_small.watchdog_needed = 0; |
| 2982 | } | 3079 | } |
| 2983 | if (mgp->rx_big.watchdog_needed) { | 3080 | if (ss->rx_big.watchdog_needed) { |
| 2984 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); | 3081 | myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); |
| 2985 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= | 3082 | if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= |
| 2986 | myri10ge_fill_thresh) | 3083 | myri10ge_fill_thresh) |
| 2987 | mgp->rx_big.watchdog_needed = 0; | 3084 | ss->rx_big.watchdog_needed = 0; |
| 2988 | } | 3085 | } |
| 2989 | rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); | ||
| 2990 | 3086 | ||
| 2991 | if (mgp->tx.req != mgp->tx.done && | 3087 | if (ss->tx.req != ss->tx.done && |
| 2992 | mgp->tx.done == mgp->watchdog_tx_done && | 3088 | ss->tx.done == ss->watchdog_tx_done && |
| 2993 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) { | 3089 | ss->watchdog_tx_req != ss->watchdog_tx_done) { |
| 2994 | /* nic seems like it might be stuck.. */ | 3090 | /* nic seems like it might be stuck.. */ |
| 2995 | if (rx_pause_cnt != mgp->watchdog_pause) { | 3091 | if (rx_pause_cnt != mgp->watchdog_pause) { |
| 2996 | if (net_ratelimit()) | 3092 | if (net_ratelimit()) |
| @@ -3005,8 +3101,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
| 3005 | /* rearm timer */ | 3101 | /* rearm timer */ |
| 3006 | mod_timer(&mgp->watchdog_timer, | 3102 | mod_timer(&mgp->watchdog_timer, |
| 3007 | jiffies + myri10ge_watchdog_timeout * HZ); | 3103 | jiffies + myri10ge_watchdog_timeout * HZ); |
| 3008 | mgp->watchdog_tx_done = mgp->tx.done; | 3104 | ss->watchdog_tx_done = ss->tx.done; |
| 3009 | mgp->watchdog_tx_req = mgp->tx.req; | 3105 | ss->watchdog_tx_req = ss->tx.req; |
| 3010 | mgp->watchdog_pause = rx_pause_cnt; | 3106 | mgp->watchdog_pause = rx_pause_cnt; |
| 3011 | } | 3107 | } |
| 3012 | 3108 | ||
| @@ -3030,7 +3126,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3030 | 3126 | ||
| 3031 | mgp = netdev_priv(netdev); | 3127 | mgp = netdev_priv(netdev); |
| 3032 | mgp->dev = netdev; | 3128 | mgp->dev = netdev; |
| 3033 | netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); | 3129 | netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); |
| 3034 | mgp->pdev = pdev; | 3130 | mgp->pdev = pdev; |
| 3035 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | 3131 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; |
| 3036 | mgp->pause = myri10ge_flow_control; | 3132 | mgp->pause = myri10ge_flow_control; |
| @@ -3076,9 +3172,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3076 | if (mgp->cmd == NULL) | 3172 | if (mgp->cmd == NULL) |
| 3077 | goto abort_with_netdev; | 3173 | goto abort_with_netdev; |
| 3078 | 3174 | ||
| 3079 | mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3175 | mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
| 3080 | &mgp->fw_stats_bus, GFP_KERNEL); | 3176 | &mgp->ss.fw_stats_bus, GFP_KERNEL); |
| 3081 | if (mgp->fw_stats == NULL) | 3177 | if (mgp->ss.fw_stats == NULL) |
| 3082 | goto abort_with_cmd; | 3178 | goto abort_with_cmd; |
| 3083 | 3179 | ||
| 3084 | mgp->board_span = pci_resource_len(pdev, 0); | 3180 | mgp->board_span = pci_resource_len(pdev, 0); |
| @@ -3118,12 +3214,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3118 | netdev->dev_addr[i] = mgp->mac_addr[i]; | 3214 | netdev->dev_addr[i] = mgp->mac_addr[i]; |
| 3119 | 3215 | ||
| 3120 | /* allocate rx done ring */ | 3216 | /* allocate rx done ring */ |
| 3121 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3217 | bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
| 3122 | mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, | 3218 | mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, |
| 3123 | &mgp->rx_done.bus, GFP_KERNEL); | 3219 | &mgp->ss.rx_done.bus, GFP_KERNEL); |
| 3124 | if (mgp->rx_done.entry == NULL) | 3220 | if (mgp->ss.rx_done.entry == NULL) |
| 3125 | goto abort_with_ioremap; | 3221 | goto abort_with_ioremap; |
| 3126 | memset(mgp->rx_done.entry, 0, bytes); | 3222 | memset(mgp->ss.rx_done.entry, 0, bytes); |
| 3127 | 3223 | ||
| 3128 | myri10ge_select_firmware(mgp); | 3224 | myri10ge_select_firmware(mgp); |
| 3129 | 3225 | ||
| @@ -3183,7 +3279,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3183 | } | 3279 | } |
| 3184 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 3280 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", |
| 3185 | (mgp->msi_enabled ? "MSI" : "xPIC"), | 3281 | (mgp->msi_enabled ? "MSI" : "xPIC"), |
| 3186 | netdev->irq, mgp->tx.boundary, mgp->fw_name, | 3282 | netdev->irq, mgp->tx_boundary, mgp->fw_name, |
| 3187 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 3283 | (mgp->wc_enabled ? "Enabled" : "Disabled")); |
| 3188 | 3284 | ||
| 3189 | return 0; | 3285 | return 0; |
| @@ -3195,9 +3291,9 @@ abort_with_firmware: | |||
| 3195 | myri10ge_dummy_rdma(mgp, 0); | 3291 | myri10ge_dummy_rdma(mgp, 0); |
| 3196 | 3292 | ||
| 3197 | abort_with_rx_done: | 3293 | abort_with_rx_done: |
| 3198 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3294 | bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
| 3199 | dma_free_coherent(&pdev->dev, bytes, | 3295 | dma_free_coherent(&pdev->dev, bytes, |
| 3200 | mgp->rx_done.entry, mgp->rx_done.bus); | 3296 | mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); |
| 3201 | 3297 | ||
| 3202 | abort_with_ioremap: | 3298 | abort_with_ioremap: |
| 3203 | iounmap(mgp->sram); | 3299 | iounmap(mgp->sram); |
| @@ -3207,8 +3303,8 @@ abort_with_wc: | |||
| 3207 | if (mgp->mtrr >= 0) | 3303 | if (mgp->mtrr >= 0) |
| 3208 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | 3304 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); |
| 3209 | #endif | 3305 | #endif |
| 3210 | dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3306 | dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
| 3211 | mgp->fw_stats, mgp->fw_stats_bus); | 3307 | mgp->ss.fw_stats, mgp->ss.fw_stats_bus); |
| 3212 | 3308 | ||
| 3213 | abort_with_cmd: | 3309 | abort_with_cmd: |
| 3214 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 3310 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
| @@ -3246,9 +3342,9 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
| 3246 | /* avoid a memory leak */ | 3342 | /* avoid a memory leak */ |
| 3247 | pci_restore_state(pdev); | 3343 | pci_restore_state(pdev); |
| 3248 | 3344 | ||
| 3249 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 3345 | bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); |
| 3250 | dma_free_coherent(&pdev->dev, bytes, | 3346 | dma_free_coherent(&pdev->dev, bytes, |
| 3251 | mgp->rx_done.entry, mgp->rx_done.bus); | 3347 | mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); |
| 3252 | 3348 | ||
| 3253 | iounmap(mgp->sram); | 3349 | iounmap(mgp->sram); |
| 3254 | 3350 | ||
| @@ -3256,8 +3352,8 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
| 3256 | if (mgp->mtrr >= 0) | 3352 | if (mgp->mtrr >= 0) |
| 3257 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | 3353 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); |
| 3258 | #endif | 3354 | #endif |
| 3259 | dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), | 3355 | dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), |
| 3260 | mgp->fw_stats, mgp->fw_stats_bus); | 3356 | mgp->ss.fw_stats, mgp->ss.fw_stats_bus); |
| 3261 | 3357 | ||
| 3262 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 3358 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
| 3263 | mgp->cmd, mgp->cmd_bus); | 3359 | mgp->cmd, mgp->cmd_bus); |
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index 58e57178c563..fdbeeee07372 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h | |||
| @@ -10,7 +10,7 @@ struct mcp_dma_addr { | |||
| 10 | __be32 low; | 10 | __be32 low; |
| 11 | }; | 11 | }; |
| 12 | 12 | ||
| 13 | /* 4 Bytes. 8 Bytes for NDIS drivers. */ | 13 | /* 4 Bytes */ |
| 14 | struct mcp_slot { | 14 | struct mcp_slot { |
| 15 | __sum16 checksum; | 15 | __sum16 checksum; |
| 16 | __be16 length; | 16 | __be16 length; |
| @@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type { | |||
| 144 | * a power of 2 number of entries. */ | 144 | * a power of 2 number of entries. */ |
| 145 | 145 | ||
| 146 | MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ | 146 | MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ |
| 147 | #define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) | ||
| 147 | 148 | ||
| 148 | /* command to bring ethernet interface up. Above parameters | 149 | /* command to bring ethernet interface up. Above parameters |
| 149 | * (plus mtu & mac address) must have been exchanged prior | 150 | * (plus mtu & mac address) must have been exchanged prior |
| @@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type { | |||
| 221 | MXGEFW_CMD_GET_MAX_RSS_QUEUES, | 222 | MXGEFW_CMD_GET_MAX_RSS_QUEUES, |
| 222 | MXGEFW_CMD_ENABLE_RSS_QUEUES, | 223 | MXGEFW_CMD_ENABLE_RSS_QUEUES, |
| 223 | /* data0 = number of slices n (0, 1, ..., n-1) to enable | 224 | /* data0 = number of slices n (0, 1, ..., n-1) to enable |
| 224 | * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. | 225 | * data1 = interrupt mode. |
| 226 | * 0=share one INTx/MSI, 1=use one MSI-X per queue. | ||
| 225 | * If all queues share one interrupt, the driver must have set | 227 | * If all queues share one interrupt, the driver must have set |
| 226 | * RSS_SHARED_INTERRUPT_DMA before enabling queues. | 228 | * RSS_SHARED_INTERRUPT_DMA before enabling queues. |
| 227 | */ | 229 | */ |
| 230 | #define MXGEFW_SLICE_INTR_MODE_SHARED 0 | ||
| 231 | #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 | ||
| 232 | |||
| 228 | MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, | 233 | MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, |
| 229 | MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, | 234 | MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, |
| 230 | /* data0, data1 = bus address lsw, msw */ | 235 | /* data0, data1 = bus address lsw, msw */ |
| @@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type { | |||
| 241 | * 0: disable rss. nic does not distribute receive packets. | 246 | * 0: disable rss. nic does not distribute receive packets. |
| 242 | * 1: enable rss. nic distributes receive packets among queues. | 247 | * 1: enable rss. nic distributes receive packets among queues. |
| 243 | * data1 = hash type | 248 | * data1 = hash type |
| 244 | * 1: IPV4 | 249 | * 1: IPV4 (required by RSS) |
| 245 | * 2: TCP_IPV4 | 250 | * 2: TCP_IPV4 (required by RSS) |
| 246 | * 3: IPV4 | TCP_IPV4 | 251 | * 3: IPV4 | TCP_IPV4 (required by RSS) |
| 252 | * 4: source port | ||
| 247 | */ | 253 | */ |
| 254 | #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 | ||
| 255 | #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 | ||
| 256 | #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 | ||
| 248 | 257 | ||
| 249 | MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, | 258 | MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, |
| 250 | /* Return data = the max. size of the entire headers of a IPv6 TSO packet. | 259 | /* Return data = the max. size of the entire headers of a IPv6 TSO packet. |
| @@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type { | |||
| 260 | * 0: Linux/FreeBSD style (NIC default) | 269 | * 0: Linux/FreeBSD style (NIC default) |
| 261 | * 1: NDIS/NetBSD style | 270 | * 1: NDIS/NetBSD style |
| 262 | */ | 271 | */ |
| 272 | #define MXGEFW_TSO_MODE_LINUX 0 | ||
| 273 | #define MXGEFW_TSO_MODE_NDIS 1 | ||
| 263 | 274 | ||
| 264 | MXGEFW_CMD_MDIO_READ, | 275 | MXGEFW_CMD_MDIO_READ, |
| 265 | /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ | 276 | /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ |
| @@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type { | |||
| 286 | /* Return data = NIC memory offset of mcp_vpump_public_global */ | 297 | /* Return data = NIC memory offset of mcp_vpump_public_global */ |
| 287 | MXGEFW_CMD_RESET_VPUMP, | 298 | MXGEFW_CMD_RESET_VPUMP, |
| 288 | /* Resets the VPUMP state */ | 299 | /* Resets the VPUMP state */ |
| 300 | |||
| 301 | MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, | ||
| 302 | /* data0 = mcp_slot type to use. | ||
| 303 | * 0 = the default 4B mcp_slot | ||
| 304 | * 1 = 8B mcp_slot_8 | ||
| 305 | */ | ||
| 306 | #define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 | ||
| 307 | #define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 | ||
| 308 | |||
| 309 | MXGEFW_CMD_SET_THROTTLE_FACTOR, | ||
| 310 | /* set the throttle factor for ethp_z8e | ||
| 311 | * data0 = throttle_factor | ||
| 312 | * throttle_factor = 256 * pcie-raw-speed / tx_speed | ||
| 313 | * tx_speed = 256 * pcie-raw-speed / throttle_factor | ||
| 314 | * | ||
| 315 | * For PCI-E x8: pcie-raw-speed == 16Gb/s | ||
| 316 | * For PCI-E x4: pcie-raw-speed == 8Gb/s | ||
| 317 | * | ||
| 318 | * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s | ||
| 319 | * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s | ||
| 320 | * | ||
| 321 | * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s | ||
| 322 | * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s | ||
| 323 | */ | ||
| 324 | |||
| 325 | MXGEFW_CMD_VPUMP_UP, | ||
| 326 | /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ | ||
| 327 | MXGEFW_CMD_GET_VPUMP_CLK, | ||
| 328 | /* Get the lanai clock */ | ||
| 329 | |||
| 330 | MXGEFW_CMD_GET_DCA_OFFSET, | ||
| 331 | /* offset of dca control for WDMAs */ | ||
| 289 | }; | 332 | }; |
| 290 | 333 | ||
| 291 | enum myri10ge_mcp_cmd_status { | 334 | enum myri10ge_mcp_cmd_status { |
| @@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status { | |||
| 302 | MXGEFW_CMD_ERROR_UNALIGNED, | 345 | MXGEFW_CMD_ERROR_UNALIGNED, |
| 303 | MXGEFW_CMD_ERROR_NO_MDIO, | 346 | MXGEFW_CMD_ERROR_NO_MDIO, |
| 304 | MXGEFW_CMD_ERROR_XFP_FAILURE, | 347 | MXGEFW_CMD_ERROR_XFP_FAILURE, |
| 305 | MXGEFW_CMD_ERROR_XFP_ABSENT | 348 | MXGEFW_CMD_ERROR_XFP_ABSENT, |
| 349 | MXGEFW_CMD_ERROR_BAD_PCIE_LINK | ||
| 306 | }; | 350 | }; |
| 307 | 351 | ||
| 308 | #define MXGEFW_OLD_IRQ_DATA_LEN 40 | 352 | #define MXGEFW_OLD_IRQ_DATA_LEN 40 |
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h index 16a810dd6d51..07d65c2cbb24 100644 --- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h | |||
| @@ -1,30 +1,6 @@ | |||
| 1 | #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ | 1 | #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ |
| 2 | #define __MYRI10GE_MCP_GEN_HEADER_H__ | 2 | #define __MYRI10GE_MCP_GEN_HEADER_H__ |
| 3 | 3 | ||
| 4 | /* this file define a standard header used as a first entry point to | ||
| 5 | * exchange information between firmware/driver and driver. The | ||
| 6 | * header structure can be anywhere in the mcp. It will usually be in | ||
| 7 | * the .data section, because some fields needs to be initialized at | ||
| 8 | * compile time. | ||
| 9 | * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must | ||
| 10 | * contains the location of the header. | ||
| 11 | * | ||
| 12 | * Typically a MCP will start with the following: | ||
| 13 | * .text | ||
| 14 | * .space 52 ! to help catch MEMORY_INT errors | ||
| 15 | * bt start ! jump to real code | ||
| 16 | * nop | ||
| 17 | * .long _gen_mcp_header | ||
| 18 | * | ||
| 19 | * The source will have a definition like: | ||
| 20 | * | ||
| 21 | * mcp_gen_header_t gen_mcp_header = { | ||
| 22 | * .header_length = sizeof(mcp_gen_header_t), | ||
| 23 | * .mcp_type = MCP_TYPE_XXX, | ||
| 24 | * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", | ||
| 25 | * .mcp_globals = (unsigned)&Globals | ||
| 26 | * }; | ||
| 27 | */ | ||
| 28 | 4 | ||
| 29 | #define MCP_HEADER_PTR_OFFSET 0x3c | 5 | #define MCP_HEADER_PTR_OFFSET 0x3c |
| 30 | 6 | ||
| @@ -32,13 +8,14 @@ | |||
| 32 | #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ | 8 | #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ |
| 33 | #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ | 9 | #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ |
| 34 | #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ | 10 | #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ |
| 11 | #define MCP_TYPE_DFLT 0x20202020 /* " " */ | ||
| 35 | 12 | ||
| 36 | struct mcp_gen_header { | 13 | struct mcp_gen_header { |
| 37 | /* the first 4 fields are filled at compile time */ | 14 | /* the first 4 fields are filled at compile time */ |
| 38 | unsigned header_length; | 15 | unsigned header_length; |
| 39 | __be32 mcp_type; | 16 | __be32 mcp_type; |
| 40 | char version[128]; | 17 | char version[128]; |
| 41 | unsigned mcp_globals; /* pointer to mcp-type specific structure */ | 18 | unsigned mcp_private; /* pointer to mcp-type specific structure */ |
| 42 | 19 | ||
| 43 | /* filled by the MCP at run-time */ | 20 | /* filled by the MCP at run-time */ |
| 44 | unsigned sram_size; | 21 | unsigned sram_size; |
| @@ -53,6 +30,18 @@ struct mcp_gen_header { | |||
| 53 | * | 30 | * |
| 54 | * Never remove any field. Keep everything naturally align. | 31 | * Never remove any field. Keep everything naturally align. |
| 55 | */ | 32 | */ |
| 33 | |||
| 34 | /* Specifies if the running mcp is mcp0, 1, or 2. */ | ||
| 35 | unsigned char mcp_index; | ||
| 36 | unsigned char disable_rabbit; | ||
| 37 | unsigned char unaligned_tlp; | ||
| 38 | unsigned char pad1; | ||
| 39 | unsigned counters_addr; | ||
| 40 | unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ | ||
| 41 | unsigned short handoff_id_major; /* must be equal */ | ||
| 42 | unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ | ||
| 43 | unsigned msix_table_addr; /* start address of msix table in firmware */ | ||
| 44 | /* 8 */ | ||
| 56 | }; | 45 | }; |
| 57 | 46 | ||
| 58 | #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ | 47 | #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ |
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 0f023447eafd..1d2daeec7ac1 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ | 1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ |
| 2 | i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ | 2 | i2c-direct.o selftest.o ethtool.o xfp_phy.o \ |
| 3 | tenxpress.o boards.o sfe4001.o | 3 | mdio_10g.o tenxpress.o boards.o sfe4001.o |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_SFC) += sfc.o | 5 | obj-$(CONFIG_SFC) += sfc.o |
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h index f56341d428e1..695764dc2e64 100644 --- a/drivers/net/sfc/boards.h +++ b/drivers/net/sfc/boards.h | |||
| @@ -22,5 +22,7 @@ enum efx_board_type { | |||
| 22 | extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); | 22 | extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); |
| 23 | extern int sfe4001_poweron(struct efx_nic *efx); | 23 | extern int sfe4001_poweron(struct efx_nic *efx); |
| 24 | extern void sfe4001_poweroff(struct efx_nic *efx); | 24 | extern void sfe4001_poweroff(struct efx_nic *efx); |
| 25 | /* Are we putting the PHY into flash config mode */ | ||
| 26 | extern unsigned int sfe4001_phy_flash_cfg; | ||
| 25 | 27 | ||
| 26 | #endif | 28 | #endif |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 59edcf793c19..418f2e53a95b 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
| @@ -1873,6 +1873,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
| 1873 | tx_queue->queue = i; | 1873 | tx_queue->queue = i; |
| 1874 | tx_queue->buffer = NULL; | 1874 | tx_queue->buffer = NULL; |
| 1875 | tx_queue->channel = &efx->channel[0]; /* for safety */ | 1875 | tx_queue->channel = &efx->channel[0]; /* for safety */ |
| 1876 | tx_queue->tso_headers_free = NULL; | ||
| 1876 | } | 1877 | } |
| 1877 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { | 1878 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { |
| 1878 | rx_queue = &efx->rx_queue[i]; | 1879 | rx_queue = &efx->rx_queue[i]; |
| @@ -2071,7 +2072,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
| 2071 | net_dev = alloc_etherdev(sizeof(*efx)); | 2072 | net_dev = alloc_etherdev(sizeof(*efx)); |
| 2072 | if (!net_dev) | 2073 | if (!net_dev) |
| 2073 | return -ENOMEM; | 2074 | return -ENOMEM; |
| 2074 | net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; | 2075 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | |
| 2076 | NETIF_F_HIGHDMA | NETIF_F_TSO); | ||
| 2075 | if (lro) | 2077 | if (lro) |
| 2076 | net_dev->features |= NETIF_F_LRO; | 2078 | net_dev->features |= NETIF_F_LRO; |
| 2077 | efx = net_dev->priv; | 2079 | efx = net_dev->priv; |
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h index 43663a4619da..c53290d08e2b 100644 --- a/drivers/net/sfc/enum.h +++ b/drivers/net/sfc/enum.h | |||
| @@ -10,6 +10,55 @@ | |||
| 10 | #ifndef EFX_ENUM_H | 10 | #ifndef EFX_ENUM_H |
| 11 | #define EFX_ENUM_H | 11 | #define EFX_ENUM_H |
| 12 | 12 | ||
| 13 | /** | ||
| 14 | * enum efx_loopback_mode - loopback modes | ||
| 15 | * @LOOPBACK_NONE: no loopback | ||
| 16 | * @LOOPBACK_XGMII: loopback within MAC at XGMII level | ||
| 17 | * @LOOPBACK_XGXS: loopback within MAC at XGXS level | ||
| 18 | * @LOOPBACK_XAUI: loopback within MAC at XAUI level | ||
| 19 | * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level | ||
| 20 | * @LOOPBACK_PCS: loopback within PHY at PCS level | ||
| 21 | * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level | ||
| 22 | * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) | ||
| 23 | */ | ||
| 24 | /* Please keep in order and up-to-date w.r.t the following two #defines */ | ||
| 25 | enum efx_loopback_mode { | ||
| 26 | LOOPBACK_NONE = 0, | ||
| 27 | LOOPBACK_MAC = 1, | ||
| 28 | LOOPBACK_XGMII = 2, | ||
| 29 | LOOPBACK_XGXS = 3, | ||
| 30 | LOOPBACK_XAUI = 4, | ||
| 31 | LOOPBACK_PHY = 5, | ||
| 32 | LOOPBACK_PHYXS = 6, | ||
| 33 | LOOPBACK_PCS = 7, | ||
| 34 | LOOPBACK_PMAPMD = 8, | ||
| 35 | LOOPBACK_NETWORK = 9, | ||
| 36 | LOOPBACK_MAX | ||
| 37 | }; | ||
| 38 | |||
| 39 | #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD | ||
| 40 | |||
| 41 | extern const char *efx_loopback_mode_names[]; | ||
| 42 | #define LOOPBACK_MODE_NAME(mode) \ | ||
| 43 | STRING_TABLE_LOOKUP(mode, efx_loopback_mode) | ||
| 44 | #define LOOPBACK_MODE(efx) \ | ||
| 45 | LOOPBACK_MODE_NAME(efx->loopback_mode) | ||
| 46 | |||
| 47 | /* These loopbacks occur within the controller */ | ||
| 48 | #define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ | ||
| 49 | (1 << LOOPBACK_XGXS) | \ | ||
| 50 | (1 << LOOPBACK_XAUI)) | ||
| 51 | |||
| 52 | #define LOOPBACK_MASK(_efx) \ | ||
| 53 | (1 << (_efx)->loopback_mode) | ||
| 54 | |||
| 55 | #define LOOPBACK_INTERNAL(_efx) \ | ||
| 56 | ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) | ||
| 57 | |||
| 58 | #define LOOPBACK_OUT_OF(_from, _to, _mask) \ | ||
| 59 | (((LOOPBACK_MASK(_from) & (_mask)) && \ | ||
| 60 | ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) | ||
| 61 | |||
| 13 | /*****************************************************************************/ | 62 | /*****************************************************************************/ |
| 14 | 63 | ||
| 15 | /** | 64 | /** |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index ad541badbd98..e2c75d101610 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
| @@ -12,12 +12,26 @@ | |||
| 12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
| 13 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
| 14 | #include "net_driver.h" | 14 | #include "net_driver.h" |
| 15 | #include "selftest.h" | ||
| 15 | #include "efx.h" | 16 | #include "efx.h" |
| 16 | #include "ethtool.h" | 17 | #include "ethtool.h" |
| 17 | #include "falcon.h" | 18 | #include "falcon.h" |
| 18 | #include "gmii.h" | 19 | #include "gmii.h" |
| 19 | #include "mac.h" | 20 | #include "mac.h" |
| 20 | 21 | ||
| 22 | const char *efx_loopback_mode_names[] = { | ||
| 23 | [LOOPBACK_NONE] = "NONE", | ||
| 24 | [LOOPBACK_MAC] = "MAC", | ||
| 25 | [LOOPBACK_XGMII] = "XGMII", | ||
| 26 | [LOOPBACK_XGXS] = "XGXS", | ||
| 27 | [LOOPBACK_XAUI] = "XAUI", | ||
| 28 | [LOOPBACK_PHY] = "PHY", | ||
| 29 | [LOOPBACK_PHYXS] = "PHY(XS)", | ||
| 30 | [LOOPBACK_PCS] = "PHY(PCS)", | ||
| 31 | [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", | ||
| 32 | [LOOPBACK_NETWORK] = "NETWORK", | ||
| 33 | }; | ||
| 34 | |||
| 21 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); | 35 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); |
| 22 | 36 | ||
| 23 | struct ethtool_string { | 37 | struct ethtool_string { |
| @@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | |||
| 217 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | 231 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); |
| 218 | } | 232 | } |
| 219 | 233 | ||
| 234 | /** | ||
| 235 | * efx_fill_test - fill in an individual self-test entry | ||
| 236 | * @test_index: Index of the test | ||
| 237 | * @strings: Ethtool strings, or %NULL | ||
| 238 | * @data: Ethtool test results, or %NULL | ||
| 239 | * @test: Pointer to test result (used only if data != %NULL) | ||
| 240 | * @unit_format: Unit name format (e.g. "channel\%d") | ||
| 241 | * @unit_id: Unit id (e.g. 0 for "channel0") | ||
| 242 | * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") | ||
| 243 | * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") | ||
| 244 | * | ||
| 245 | * Fill in an individual self-test entry. | ||
| 246 | */ | ||
| 247 | static void efx_fill_test(unsigned int test_index, | ||
| 248 | struct ethtool_string *strings, u64 *data, | ||
| 249 | int *test, const char *unit_format, int unit_id, | ||
| 250 | const char *test_format, const char *test_id) | ||
| 251 | { | ||
| 252 | struct ethtool_string unit_str, test_str; | ||
| 253 | |||
| 254 | /* Fill data value, if applicable */ | ||
| 255 | if (data) | ||
| 256 | data[test_index] = *test; | ||
| 257 | |||
| 258 | /* Fill string, if applicable */ | ||
| 259 | if (strings) { | ||
| 260 | snprintf(unit_str.name, sizeof(unit_str.name), | ||
| 261 | unit_format, unit_id); | ||
| 262 | snprintf(test_str.name, sizeof(test_str.name), | ||
| 263 | test_format, test_id); | ||
| 264 | snprintf(strings[test_index].name, | ||
| 265 | sizeof(strings[test_index].name), | ||
| 266 | "%-9s%-17s", unit_str.name, test_str.name); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | |||
| 270 | #define EFX_PORT_NAME "port%d", 0 | ||
| 271 | #define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel | ||
| 272 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue | ||
| 273 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue | ||
| 274 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ | ||
| 275 | "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) | ||
| 276 | |||
| 277 | /** | ||
| 278 | * efx_fill_loopback_test - fill in a block of loopback self-test entries | ||
| 279 | * @efx: Efx NIC | ||
| 280 | * @lb_tests: Efx loopback self-test results structure | ||
| 281 | * @mode: Loopback test mode | ||
| 282 | * @test_index: Starting index of the test | ||
| 283 | * @strings: Ethtool strings, or %NULL | ||
| 284 | * @data: Ethtool test results, or %NULL | ||
| 285 | */ | ||
| 286 | static int efx_fill_loopback_test(struct efx_nic *efx, | ||
| 287 | struct efx_loopback_self_tests *lb_tests, | ||
| 288 | enum efx_loopback_mode mode, | ||
| 289 | unsigned int test_index, | ||
| 290 | struct ethtool_string *strings, u64 *data) | ||
| 291 | { | ||
| 292 | struct efx_tx_queue *tx_queue; | ||
| 293 | |||
| 294 | efx_for_each_tx_queue(tx_queue, efx) { | ||
| 295 | efx_fill_test(test_index++, strings, data, | ||
| 296 | &lb_tests->tx_sent[tx_queue->queue], | ||
| 297 | EFX_TX_QUEUE_NAME(tx_queue), | ||
| 298 | EFX_LOOPBACK_NAME(mode, "tx_sent")); | ||
| 299 | efx_fill_test(test_index++, strings, data, | ||
| 300 | &lb_tests->tx_done[tx_queue->queue], | ||
| 301 | EFX_TX_QUEUE_NAME(tx_queue), | ||
| 302 | EFX_LOOPBACK_NAME(mode, "tx_done")); | ||
| 303 | } | ||
| 304 | efx_fill_test(test_index++, strings, data, | ||
| 305 | &lb_tests->rx_good, | ||
| 306 | EFX_PORT_NAME, | ||
| 307 | EFX_LOOPBACK_NAME(mode, "rx_good")); | ||
| 308 | efx_fill_test(test_index++, strings, data, | ||
| 309 | &lb_tests->rx_bad, | ||
| 310 | EFX_PORT_NAME, | ||
| 311 | EFX_LOOPBACK_NAME(mode, "rx_bad")); | ||
| 312 | |||
| 313 | return test_index; | ||
| 314 | } | ||
| 315 | |||
| 316 | /** | ||
| 317 | * efx_ethtool_fill_self_tests - get self-test details | ||
| 318 | * @efx: Efx NIC | ||
| 319 | * @tests: Efx self-test results structure, or %NULL | ||
| 320 | * @strings: Ethtool strings, or %NULL | ||
| 321 | * @data: Ethtool test results, or %NULL | ||
| 322 | */ | ||
| 323 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | ||
| 324 | struct efx_self_tests *tests, | ||
| 325 | struct ethtool_string *strings, | ||
| 326 | u64 *data) | ||
| 327 | { | ||
| 328 | struct efx_channel *channel; | ||
| 329 | unsigned int n = 0; | ||
| 330 | enum efx_loopback_mode mode; | ||
| 331 | |||
| 332 | /* Interrupt */ | ||
| 333 | efx_fill_test(n++, strings, data, &tests->interrupt, | ||
| 334 | "core", 0, "interrupt", NULL); | ||
| 335 | |||
| 336 | /* Event queues */ | ||
| 337 | efx_for_each_channel(channel, efx) { | ||
| 338 | efx_fill_test(n++, strings, data, | ||
| 339 | &tests->eventq_dma[channel->channel], | ||
| 340 | EFX_CHANNEL_NAME(channel), | ||
| 341 | "eventq.dma", NULL); | ||
| 342 | efx_fill_test(n++, strings, data, | ||
| 343 | &tests->eventq_int[channel->channel], | ||
| 344 | EFX_CHANNEL_NAME(channel), | ||
| 345 | "eventq.int", NULL); | ||
| 346 | efx_fill_test(n++, strings, data, | ||
| 347 | &tests->eventq_poll[channel->channel], | ||
| 348 | EFX_CHANNEL_NAME(channel), | ||
| 349 | "eventq.poll", NULL); | ||
| 350 | } | ||
| 351 | |||
| 352 | /* PHY presence */ | ||
| 353 | efx_fill_test(n++, strings, data, &tests->phy_ok, | ||
| 354 | EFX_PORT_NAME, "phy_ok", NULL); | ||
| 355 | |||
| 356 | /* Loopback tests */ | ||
| 357 | efx_fill_test(n++, strings, data, &tests->loopback_speed, | ||
| 358 | EFX_PORT_NAME, "loopback.speed", NULL); | ||
| 359 | efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, | ||
| 360 | EFX_PORT_NAME, "loopback.full_duplex", NULL); | ||
| 361 | for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { | ||
| 362 | if (!(efx->loopback_modes & (1 << mode))) | ||
| 363 | continue; | ||
| 364 | n = efx_fill_loopback_test(efx, | ||
| 365 | &tests->loopback[mode], mode, n, | ||
| 366 | strings, data); | ||
| 367 | } | ||
| 368 | |||
| 369 | return n; | ||
| 370 | } | ||
| 371 | |||
| 220 | static int efx_ethtool_get_stats_count(struct net_device *net_dev) | 372 | static int efx_ethtool_get_stats_count(struct net_device *net_dev) |
| 221 | { | 373 | { |
| 222 | return EFX_ETHTOOL_NUM_STATS; | 374 | return EFX_ETHTOOL_NUM_STATS; |
| 223 | } | 375 | } |
| 224 | 376 | ||
| 377 | static int efx_ethtool_self_test_count(struct net_device *net_dev) | ||
| 378 | { | ||
| 379 | struct efx_nic *efx = net_dev->priv; | ||
| 380 | |||
| 381 | return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); | ||
| 382 | } | ||
| 383 | |||
| 225 | static void efx_ethtool_get_strings(struct net_device *net_dev, | 384 | static void efx_ethtool_get_strings(struct net_device *net_dev, |
| 226 | u32 string_set, u8 *strings) | 385 | u32 string_set, u8 *strings) |
| 227 | { | 386 | { |
| 387 | struct efx_nic *efx = net_dev->priv; | ||
| 228 | struct ethtool_string *ethtool_strings = | 388 | struct ethtool_string *ethtool_strings = |
| 229 | (struct ethtool_string *)strings; | 389 | (struct ethtool_string *)strings; |
| 230 | int i; | 390 | int i; |
| 231 | 391 | ||
| 232 | if (string_set == ETH_SS_STATS) | 392 | switch (string_set) { |
| 393 | case ETH_SS_STATS: | ||
| 233 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | 394 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) |
| 234 | strncpy(ethtool_strings[i].name, | 395 | strncpy(ethtool_strings[i].name, |
| 235 | efx_ethtool_stats[i].name, | 396 | efx_ethtool_stats[i].name, |
| 236 | sizeof(ethtool_strings[i].name)); | 397 | sizeof(ethtool_strings[i].name)); |
| 398 | break; | ||
| 399 | case ETH_SS_TEST: | ||
| 400 | efx_ethtool_fill_self_tests(efx, NULL, | ||
| 401 | ethtool_strings, NULL); | ||
| 402 | break; | ||
| 403 | default: | ||
| 404 | /* No other string sets */ | ||
| 405 | break; | ||
| 406 | } | ||
| 237 | } | 407 | } |
| 238 | 408 | ||
| 239 | static void efx_ethtool_get_stats(struct net_device *net_dev, | 409 | static void efx_ethtool_get_stats(struct net_device *net_dev, |
| @@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, | |||
| 272 | } | 442 | } |
| 273 | } | 443 | } |
| 274 | 444 | ||
| 445 | static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) | ||
| 446 | { | ||
| 447 | int rc; | ||
| 448 | |||
| 449 | /* Our TSO requires TX checksumming, so force TX checksumming | ||
| 450 | * on when TSO is enabled. | ||
| 451 | */ | ||
| 452 | if (enable) { | ||
| 453 | rc = efx_ethtool_set_tx_csum(net_dev, 1); | ||
| 454 | if (rc) | ||
| 455 | return rc; | ||
| 456 | } | ||
| 457 | |||
| 458 | return ethtool_op_set_tso(net_dev, enable); | ||
| 459 | } | ||
| 460 | |||
| 275 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | 461 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) |
| 276 | { | 462 | { |
| 277 | struct efx_nic *efx = net_dev->priv; | 463 | struct efx_nic *efx = net_dev->priv; |
| @@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | |||
| 283 | 469 | ||
| 284 | efx_flush_queues(efx); | 470 | efx_flush_queues(efx); |
| 285 | 471 | ||
| 472 | /* Our TSO requires TX checksumming, so disable TSO when | ||
| 473 | * checksumming is disabled | ||
| 474 | */ | ||
| 475 | if (!enable) { | ||
| 476 | rc = efx_ethtool_set_tso(net_dev, 0); | ||
| 477 | if (rc) | ||
| 478 | return rc; | ||
| 479 | } | ||
| 480 | |||
| 286 | return 0; | 481 | return 0; |
| 287 | } | 482 | } |
| 288 | 483 | ||
| @@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) | |||
| 305 | return efx->rx_checksum_enabled; | 500 | return efx->rx_checksum_enabled; |
| 306 | } | 501 | } |
| 307 | 502 | ||
| 503 | static void efx_ethtool_self_test(struct net_device *net_dev, | ||
| 504 | struct ethtool_test *test, u64 *data) | ||
| 505 | { | ||
| 506 | struct efx_nic *efx = net_dev->priv; | ||
| 507 | struct efx_self_tests efx_tests; | ||
| 508 | int offline, already_up; | ||
| 509 | int rc; | ||
| 510 | |||
| 511 | ASSERT_RTNL(); | ||
| 512 | if (efx->state != STATE_RUNNING) { | ||
| 513 | rc = -EIO; | ||
| 514 | goto fail1; | ||
| 515 | } | ||
| 516 | |||
| 517 | /* We need rx buffers and interrupts. */ | ||
| 518 | already_up = (efx->net_dev->flags & IFF_UP); | ||
| 519 | if (!already_up) { | ||
| 520 | rc = dev_open(efx->net_dev); | ||
| 521 | if (rc) { | ||
| 522 | EFX_ERR(efx, "failed opening device.\n"); | ||
| 523 | goto fail2; | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 527 | memset(&efx_tests, 0, sizeof(efx_tests)); | ||
| 528 | offline = (test->flags & ETH_TEST_FL_OFFLINE); | ||
| 529 | |||
| 530 | /* Perform online self tests first */ | ||
| 531 | rc = efx_online_test(efx, &efx_tests); | ||
| 532 | if (rc) | ||
| 533 | goto out; | ||
| 534 | |||
| 535 | /* Perform offline tests only if online tests passed */ | ||
| 536 | if (offline) { | ||
| 537 | /* Stop the kernel from sending packets during the test. */ | ||
| 538 | efx_stop_queue(efx); | ||
| 539 | rc = efx_flush_queues(efx); | ||
| 540 | if (!rc) | ||
| 541 | rc = efx_offline_test(efx, &efx_tests, | ||
| 542 | efx->loopback_modes); | ||
| 543 | efx_wake_queue(efx); | ||
| 544 | } | ||
| 545 | |||
| 546 | out: | ||
| 547 | if (!already_up) | ||
| 548 | dev_close(efx->net_dev); | ||
| 549 | |||
| 550 | EFX_LOG(efx, "%s all %sline self-tests\n", | ||
| 551 | rc == 0 ? "passed" : "failed", offline ? "off" : "on"); | ||
| 552 | |||
| 553 | fail2: | ||
| 554 | fail1: | ||
| 555 | /* Fill ethtool results structures */ | ||
| 556 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | ||
| 557 | if (rc) | ||
| 558 | test->flags |= ETH_TEST_FL_FAILED; | ||
| 559 | } | ||
| 560 | |||
| 308 | /* Restart autonegotiation */ | 561 | /* Restart autonegotiation */ |
| 309 | static int efx_ethtool_nway_reset(struct net_device *net_dev) | 562 | static int efx_ethtool_nway_reset(struct net_device *net_dev) |
| 310 | { | 563 | { |
| @@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = { | |||
| 451 | .set_tx_csum = efx_ethtool_set_tx_csum, | 704 | .set_tx_csum = efx_ethtool_set_tx_csum, |
| 452 | .get_sg = ethtool_op_get_sg, | 705 | .get_sg = ethtool_op_get_sg, |
| 453 | .set_sg = ethtool_op_set_sg, | 706 | .set_sg = ethtool_op_set_sg, |
| 707 | .get_tso = ethtool_op_get_tso, | ||
| 708 | .set_tso = efx_ethtool_set_tso, | ||
| 454 | .get_flags = ethtool_op_get_flags, | 709 | .get_flags = ethtool_op_get_flags, |
| 455 | .set_flags = ethtool_op_set_flags, | 710 | .set_flags = ethtool_op_set_flags, |
| 711 | .self_test_count = efx_ethtool_self_test_count, | ||
| 712 | .self_test = efx_ethtool_self_test, | ||
| 456 | .get_strings = efx_ethtool_get_strings, | 713 | .get_strings = efx_ethtool_get_strings, |
| 457 | .phys_id = efx_ethtool_phys_id, | 714 | .phys_id = efx_ethtool_phys_id, |
| 458 | .get_stats_count = efx_ethtool_get_stats_count, | 715 | .get_stats_count = efx_ethtool_get_stats_count, |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 46db549ce580..b57cc68058c0 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
| @@ -1129,6 +1129,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel, | |||
| 1129 | case RX_RECOVERY_EV_DECODE: | 1129 | case RX_RECOVERY_EV_DECODE: |
| 1130 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | 1130 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " |
| 1131 | "Resetting.\n", channel->channel); | 1131 | "Resetting.\n", channel->channel); |
| 1132 | atomic_inc(&efx->rx_reset); | ||
| 1132 | efx_schedule_reset(efx, | 1133 | efx_schedule_reset(efx, |
| 1133 | EFX_WORKAROUND_6555(efx) ? | 1134 | EFX_WORKAROUND_6555(efx) ? |
| 1134 | RESET_TYPE_RX_RECOVERY : | 1135 | RESET_TYPE_RX_RECOVERY : |
| @@ -1731,7 +1732,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
| 1731 | efx_oword_t temp; | 1732 | efx_oword_t temp; |
| 1732 | int count; | 1733 | int count; |
| 1733 | 1734 | ||
| 1734 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1735 | if ((FALCON_REV(efx) < FALCON_REV_B0) || |
| 1736 | (efx->loopback_mode != LOOPBACK_NONE)) | ||
| 1735 | return; | 1737 | return; |
| 1736 | 1738 | ||
| 1737 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1739 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
| @@ -2091,6 +2093,8 @@ static int falcon_probe_phy(struct efx_nic *efx) | |||
| 2091 | efx->phy_type); | 2093 | efx->phy_type); |
| 2092 | return -1; | 2094 | return -1; |
| 2093 | } | 2095 | } |
| 2096 | |||
| 2097 | efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; | ||
| 2094 | return 0; | 2098 | return 0; |
| 2095 | } | 2099 | } |
| 2096 | 2100 | ||
| @@ -2468,14 +2472,12 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
| 2468 | fail5: | 2472 | fail5: |
| 2469 | falcon_free_buffer(efx, &efx->irq_status); | 2473 | falcon_free_buffer(efx, &efx->irq_status); |
| 2470 | fail4: | 2474 | fail4: |
| 2471 | /* fall-thru */ | ||
| 2472 | fail3: | 2475 | fail3: |
| 2473 | if (nic_data->pci_dev2) { | 2476 | if (nic_data->pci_dev2) { |
| 2474 | pci_dev_put(nic_data->pci_dev2); | 2477 | pci_dev_put(nic_data->pci_dev2); |
| 2475 | nic_data->pci_dev2 = NULL; | 2478 | nic_data->pci_dev2 = NULL; |
| 2476 | } | 2479 | } |
| 2477 | fail2: | 2480 | fail2: |
| 2478 | /* fall-thru */ | ||
| 2479 | fail1: | 2481 | fail1: |
| 2480 | kfree(efx->nic_data); | 2482 | kfree(efx->nic_data); |
| 2481 | return rc; | 2483 | return rc; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 0485a63eaff6..06e2d68fc3d1 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
| @@ -636,6 +636,14 @@ | |||
| 636 | #define XX_HIDRVA_WIDTH 1 | 636 | #define XX_HIDRVA_WIDTH 1 |
| 637 | #define XX_LODRVA_LBN 8 | 637 | #define XX_LODRVA_LBN 8 |
| 638 | #define XX_LODRVA_WIDTH 1 | 638 | #define XX_LODRVA_WIDTH 1 |
| 639 | #define XX_LPBKD_LBN 3 | ||
| 640 | #define XX_LPBKD_WIDTH 1 | ||
| 641 | #define XX_LPBKC_LBN 2 | ||
| 642 | #define XX_LPBKC_WIDTH 1 | ||
| 643 | #define XX_LPBKB_LBN 1 | ||
| 644 | #define XX_LPBKB_WIDTH 1 | ||
| 645 | #define XX_LPBKA_LBN 0 | ||
| 646 | #define XX_LPBKA_WIDTH 1 | ||
| 639 | 647 | ||
| 640 | #define XX_TXDRV_CTL_REG_MAC 0x12 | 648 | #define XX_TXDRV_CTL_REG_MAC 0x12 |
| 641 | #define XX_DEQD_LBN 28 | 649 | #define XX_DEQD_LBN 28 |
| @@ -656,8 +664,14 @@ | |||
| 656 | #define XX_DTXA_WIDTH 4 | 664 | #define XX_DTXA_WIDTH 4 |
| 657 | 665 | ||
| 658 | /* XAUI XGXS core status register */ | 666 | /* XAUI XGXS core status register */ |
| 659 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
| 660 | #define XX_CORE_STAT_REG_MAC 0x16 | 667 | #define XX_CORE_STAT_REG_MAC 0x16 |
| 668 | #define XX_FORCE_SIG_LBN 24 | ||
| 669 | #define XX_FORCE_SIG_WIDTH 8 | ||
| 670 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
| 671 | #define XX_XGXS_LB_EN_LBN 23 | ||
| 672 | #define XX_XGXS_LB_EN_WIDTH 1 | ||
| 673 | #define XX_XGMII_LB_EN_LBN 22 | ||
| 674 | #define XX_XGMII_LB_EN_WIDTH 1 | ||
| 661 | #define XX_ALIGN_DONE_LBN 20 | 675 | #define XX_ALIGN_DONE_LBN 20 |
| 662 | #define XX_ALIGN_DONE_WIDTH 1 | 676 | #define XX_ALIGN_DONE_WIDTH 1 |
| 663 | #define XX_SYNC_STAT_LBN 16 | 677 | #define XX_SYNC_STAT_LBN 16 |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index aa7521b24a5d..a74b7931a3c4 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) | 32 | (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) |
| 33 | 33 | ||
| 34 | void falcon_xmac_writel(struct efx_nic *efx, | 34 | void falcon_xmac_writel(struct efx_nic *efx, |
| 35 | efx_dword_t *value, unsigned int mac_reg) | 35 | efx_dword_t *value, unsigned int mac_reg) |
| 36 | { | 36 | { |
| 37 | efx_oword_t temp; | 37 | efx_oword_t temp; |
| 38 | 38 | ||
| @@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx) | |||
| 69 | udelay(10); | 69 | udelay(10); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | /* This often fails when DSP is disabled, ignore it */ | ||
| 73 | if (sfe4001_phy_flash_cfg != 0) | ||
| 74 | return 0; | ||
| 75 | |||
| 72 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); | 76 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); |
| 73 | return -ETIMEDOUT; | 77 | return -ETIMEDOUT; |
| 74 | } | 78 | } |
| @@ -223,7 +227,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
| 223 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
| 224 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | 228 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); |
| 225 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | 229 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); |
| 226 | 230 | ||
| 227 | if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || | 231 | if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || |
| 228 | EFX_DWORD_FIELD(reg, XM_RMTFLT)) { | 232 | EFX_DWORD_FIELD(reg, XM_RMTFLT)) { |
| 229 | EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); | 233 | EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); |
| @@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
| 237 | { | 241 | { |
| 238 | efx_dword_t reg; | 242 | efx_dword_t reg; |
| 239 | 243 | ||
| 240 | if (FALCON_REV(efx) < FALCON_REV_B0) | 244 | if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
| 241 | return; | 245 | return; |
| 242 | 246 | ||
| 243 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
| @@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx) | |||
| 284 | efx_dword_t reg; | 288 | efx_dword_t reg; |
| 285 | int align_done, sync_status, link_ok = 0; | 289 | int align_done, sync_status, link_ok = 0; |
| 286 | 290 | ||
| 291 | if (LOOPBACK_INTERNAL(efx)) | ||
| 292 | return 1; | ||
| 293 | |||
| 287 | /* Read link status */ | 294 | /* Read link status */ |
| 288 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | 295 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); |
| 289 | 296 | ||
| @@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) | |||
| 374 | falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); | 381 | falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); |
| 375 | } | 382 | } |
| 376 | 383 | ||
| 384 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | ||
| 385 | { | ||
| 386 | efx_dword_t reg; | ||
| 387 | int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; | ||
| 388 | int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; | ||
| 389 | int xgmii_loopback = | ||
| 390 | (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; | ||
| 391 | |||
| 392 | /* XGXS block is flaky and will need to be reset if moving | ||
| 393 | * into our out of XGMII, XGXS or XAUI loopbacks. */ | ||
| 394 | if (EFX_WORKAROUND_5147(efx)) { | ||
| 395 | int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; | ||
| 396 | int reset_xgxs; | ||
| 397 | |||
| 398 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | ||
| 399 | old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); | ||
| 400 | old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); | ||
| 401 | |||
| 402 | falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); | ||
| 403 | old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); | ||
| 404 | |||
| 405 | /* The PHY driver may have turned XAUI off */ | ||
| 406 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || | ||
| 407 | (xaui_loopback != old_xaui_loopback) || | ||
| 408 | (xgmii_loopback != old_xgmii_loopback)); | ||
| 409 | if (reset_xgxs) { | ||
| 410 | falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 411 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); | ||
| 412 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); | ||
| 413 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 414 | udelay(1); | ||
| 415 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); | ||
| 416 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); | ||
| 417 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 418 | udelay(1); | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 422 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | ||
| 423 | EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, | ||
| 424 | (xgxs_loopback || xaui_loopback) ? | ||
| 425 | XX_FORCE_SIG_DECODE_FORCED : 0); | ||
| 426 | EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); | ||
| 427 | EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); | ||
| 428 | falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC); | ||
| 429 | |||
| 430 | falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); | ||
| 431 | EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); | ||
| 432 | EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); | ||
| 433 | EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); | ||
| 434 | EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); | ||
| 435 | falcon_xmac_writel(efx, ®, XX_SD_CTL_REG_MAC); | ||
| 436 | } | ||
| 437 | |||
| 438 | |||
| 377 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails | 439 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails |
| 378 | * to come back up. Bash it until it comes back up */ | 440 | * to come back up. Bash it until it comes back up */ |
| 379 | static int falcon_check_xaui_link_up(struct efx_nic *efx) | 441 | static int falcon_check_xaui_link_up(struct efx_nic *efx) |
| @@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
| 382 | tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; | 444 | tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; |
| 383 | max_tries = tries; | 445 | max_tries = tries; |
| 384 | 446 | ||
| 385 | if (efx->phy_type == PHY_TYPE_NONE) | 447 | if ((efx->loopback_mode == LOOPBACK_NETWORK) || |
| 448 | (efx->phy_type == PHY_TYPE_NONE)) | ||
| 386 | return 0; | 449 | return 0; |
| 387 | 450 | ||
| 388 | while (tries) { | 451 | while (tries) { |
| @@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx) | |||
| 408 | falcon_mask_status_intr(efx, 0); | 471 | falcon_mask_status_intr(efx, 0); |
| 409 | 472 | ||
| 410 | falcon_deconfigure_mac_wrapper(efx); | 473 | falcon_deconfigure_mac_wrapper(efx); |
| 474 | |||
| 475 | efx->tx_disabled = LOOPBACK_INTERNAL(efx); | ||
| 411 | efx->phy_op->reconfigure(efx); | 476 | efx->phy_op->reconfigure(efx); |
| 477 | |||
| 478 | falcon_reconfigure_xgxs_core(efx); | ||
| 412 | falcon_reconfigure_xmac_core(efx); | 479 | falcon_reconfigure_xmac_core(efx); |
| 480 | |||
| 413 | falcon_reconfigure_mac_wrapper(efx); | 481 | falcon_reconfigure_mac_wrapper(efx); |
| 414 | 482 | ||
| 415 | /* Ensure XAUI link is up */ | 483 | /* Ensure XAUI link is up */ |
| @@ -491,13 +559,15 @@ void falcon_update_stats_xmac(struct efx_nic *efx) | |||
| 491 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes); | 559 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes); |
| 492 | } | 560 | } |
| 493 | 561 | ||
| 494 | #define EFX_XAUI_RETRAIN_MAX 8 | ||
| 495 | |||
| 496 | int falcon_check_xmac(struct efx_nic *efx) | 562 | int falcon_check_xmac(struct efx_nic *efx) |
| 497 | { | 563 | { |
| 498 | unsigned xaui_link_ok; | 564 | unsigned xaui_link_ok; |
| 499 | int rc; | 565 | int rc; |
| 500 | 566 | ||
| 567 | if ((efx->loopback_mode == LOOPBACK_NETWORK) || | ||
| 568 | (efx->phy_type == PHY_TYPE_NONE)) | ||
| 569 | return 0; | ||
| 570 | |||
| 501 | falcon_mask_status_intr(efx, 0); | 571 | falcon_mask_status_intr(efx, 0); |
| 502 | xaui_link_ok = falcon_xaui_link_ok(efx); | 572 | xaui_link_ok = falcon_xaui_link_ok(efx); |
| 503 | 573 | ||
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index dc06bb0aa575..c4f540e93b79 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c | |||
| @@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd, | |||
| 44 | int status; | 44 | int status; |
| 45 | int phy_id = efx->mii.phy_id; | 45 | int phy_id = efx->mii.phy_id; |
| 46 | 46 | ||
| 47 | if (LOOPBACK_INTERNAL(efx)) | ||
| 48 | return 0; | ||
| 49 | |||
| 47 | /* Read MMD STATUS2 to check it is responding. */ | 50 | /* Read MMD STATUS2 to check it is responding. */ |
| 48 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); | 51 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); |
| 49 | if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & | 52 | if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & |
| @@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
| 164 | int mmd = 0; | 167 | int mmd = 0; |
| 165 | int good; | 168 | int good; |
| 166 | 169 | ||
| 170 | /* If the port is in loopback, then we should only consider a subset | ||
| 171 | * of mmd's */ | ||
| 172 | if (LOOPBACK_INTERNAL(efx)) | ||
| 173 | return 1; | ||
| 174 | else if (efx->loopback_mode == LOOPBACK_NETWORK) | ||
| 175 | return 0; | ||
| 176 | else if (efx->loopback_mode == LOOPBACK_PHYXS) | ||
| 177 | mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | | ||
| 178 | MDIO_MMDREG_DEVS0_PCS | | ||
| 179 | MDIO_MMDREG_DEVS0_PMAPMD); | ||
| 180 | else if (efx->loopback_mode == LOOPBACK_PCS) | ||
| 181 | mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | | ||
| 182 | MDIO_MMDREG_DEVS0_PMAPMD); | ||
| 183 | else if (efx->loopback_mode == LOOPBACK_PMAPMD) | ||
| 184 | mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; | ||
| 185 | |||
| 167 | while (mmd_mask) { | 186 | while (mmd_mask) { |
| 168 | if (mmd_mask & 1) { | 187 | if (mmd_mask & 1) { |
| 169 | /* Double reads because link state is latched, and a | 188 | /* Double reads because link state is latched, and a |
| @@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
| 182 | return ok; | 201 | return ok; |
| 183 | } | 202 | } |
| 184 | 203 | ||
| 204 | void mdio_clause45_transmit_disable(struct efx_nic *efx) | ||
| 205 | { | ||
| 206 | int phy_id = efx->mii.phy_id; | ||
| 207 | int ctrl1, ctrl2; | ||
| 208 | |||
| 209 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 210 | MDIO_MMDREG_TXDIS); | ||
| 211 | if (efx->tx_disabled) | ||
| 212 | ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); | ||
| 213 | else | ||
| 214 | ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); | ||
| 215 | if (ctrl1 != ctrl2) | ||
| 216 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 217 | MDIO_MMDREG_TXDIS, ctrl2); | ||
| 218 | } | ||
| 219 | |||
| 220 | void mdio_clause45_phy_reconfigure(struct efx_nic *efx) | ||
| 221 | { | ||
| 222 | int phy_id = efx->mii.phy_id; | ||
| 223 | int ctrl1, ctrl2; | ||
| 224 | |||
| 225 | /* Handle (with debouncing) PMA/PMD loopback */ | ||
| 226 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 227 | MDIO_MMDREG_CTRL1); | ||
| 228 | |||
| 229 | if (efx->loopback_mode == LOOPBACK_PMAPMD) | ||
| 230 | ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); | ||
| 231 | else | ||
| 232 | ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); | ||
| 233 | |||
| 234 | if (ctrl1 != ctrl2) | ||
| 235 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 236 | MDIO_MMDREG_CTRL1, ctrl2); | ||
| 237 | |||
| 238 | /* Handle (with debouncing) PCS loopback */ | ||
| 239 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, | ||
| 240 | MDIO_MMDREG_CTRL1); | ||
| 241 | if (efx->loopback_mode == LOOPBACK_PCS) | ||
| 242 | ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
| 243 | else | ||
| 244 | ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
| 245 | |||
| 246 | if (ctrl1 != ctrl2) | ||
| 247 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, | ||
| 248 | MDIO_MMDREG_CTRL1, ctrl2); | ||
| 249 | |||
| 250 | /* Handle (with debouncing) PHYXS network loopback */ | ||
| 251 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
| 252 | MDIO_MMDREG_CTRL1); | ||
| 253 | if (efx->loopback_mode == LOOPBACK_NETWORK) | ||
| 254 | ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
| 255 | else | ||
| 256 | ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
| 257 | |||
| 258 | if (ctrl1 != ctrl2) | ||
| 259 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, | ||
| 260 | MDIO_MMDREG_CTRL1, ctrl2); | ||
| 261 | } | ||
| 262 | |||
| 185 | /** | 263 | /** |
| 186 | * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. | 264 | * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. |
| 187 | * @efx: Efx NIC | 265 | * @efx: Efx NIC |
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 2214b6d820a7..cb99f3f4491c 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h | |||
| @@ -44,11 +44,16 @@ | |||
| 44 | #define MDIO_MMDREG_DEVS1 (6) | 44 | #define MDIO_MMDREG_DEVS1 (6) |
| 45 | #define MDIO_MMDREG_CTRL2 (7) | 45 | #define MDIO_MMDREG_CTRL2 (7) |
| 46 | #define MDIO_MMDREG_STAT2 (8) | 46 | #define MDIO_MMDREG_STAT2 (8) |
| 47 | #define MDIO_MMDREG_TXDIS (9) | ||
| 47 | 48 | ||
| 48 | /* Bits in MMDREG_CTRL1 */ | 49 | /* Bits in MMDREG_CTRL1 */ |
| 49 | /* Reset */ | 50 | /* Reset */ |
| 50 | #define MDIO_MMDREG_CTRL1_RESET_LBN (15) | 51 | #define MDIO_MMDREG_CTRL1_RESET_LBN (15) |
| 51 | #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) | 52 | #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) |
| 53 | /* Loopback */ | ||
| 54 | /* Loopback bit for WIS, PCS, PHYSX and DTEXS */ | ||
| 55 | #define MDIO_MMDREG_CTRL1_LBACK_LBN (14) | ||
| 56 | #define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) | ||
| 52 | 57 | ||
| 53 | /* Bits in MMDREG_STAT1 */ | 58 | /* Bits in MMDREG_STAT1 */ |
| 54 | #define MDIO_MMDREG_STAT1_FAULT_LBN (7) | 59 | #define MDIO_MMDREG_STAT1_FAULT_LBN (7) |
| @@ -56,6 +61,9 @@ | |||
| 56 | /* Link state */ | 61 | /* Link state */ |
| 57 | #define MDIO_MMDREG_STAT1_LINK_LBN (2) | 62 | #define MDIO_MMDREG_STAT1_LINK_LBN (2) |
| 58 | #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) | 63 | #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) |
| 64 | /* Low power ability */ | ||
| 65 | #define MDIO_MMDREG_STAT1_LPABLE_LBN (1) | ||
| 66 | #define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) | ||
| 59 | 67 | ||
| 60 | /* Bits in ID reg */ | 68 | /* Bits in ID reg */ |
| 61 | #define MDIO_ID_REV(_id32) (_id32 & 0xf) | 69 | #define MDIO_ID_REV(_id32) (_id32 & 0xf) |
| @@ -76,6 +84,14 @@ | |||
| 76 | #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) | 84 | #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) |
| 77 | #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) | 85 | #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) |
| 78 | 86 | ||
| 87 | /* Bits in MMDREG_TXDIS */ | ||
| 88 | #define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) | ||
| 89 | #define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) | ||
| 90 | |||
| 91 | /* MMD-specific bits, ordered by MMD, then register */ | ||
| 92 | #define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) | ||
| 93 | #define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) | ||
| 94 | |||
| 79 | /* PMA type (4 bits) */ | 95 | /* PMA type (4 bits) */ |
| 80 | #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) | 96 | #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) |
| 81 | #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) | 97 | #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) |
| @@ -95,7 +111,7 @@ | |||
| 95 | #define MDIO_PMAPMD_CTRL2_10_BT (0xf) | 111 | #define MDIO_PMAPMD_CTRL2_10_BT (0xf) |
| 96 | #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) | 112 | #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) |
| 97 | 113 | ||
| 98 | /* /\* PHY XGXS lane state *\/ */ | 114 | /* PHY XGXS lane state */ |
| 99 | #define MDIO_PHYXS_LANE_STATE (0x18) | 115 | #define MDIO_PHYXS_LANE_STATE (0x18) |
| 100 | #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) | 116 | #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) |
| 101 | 117 | ||
| @@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx, | |||
| 217 | extern int mdio_clause45_links_ok(struct efx_nic *efx, | 233 | extern int mdio_clause45_links_ok(struct efx_nic *efx, |
| 218 | unsigned int mmd_mask); | 234 | unsigned int mmd_mask); |
| 219 | 235 | ||
| 236 | /* Generic transmit disable support though PMAPMD */ | ||
| 237 | extern void mdio_clause45_transmit_disable(struct efx_nic *efx); | ||
| 238 | |||
| 239 | /* Generic part of reconfigure: set/clear loopback bits */ | ||
| 240 | extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); | ||
| 241 | |||
| 220 | /* Read (some of) the PHY settings over MDIO */ | 242 | /* Read (some of) the PHY settings over MDIO */ |
| 221 | extern void mdio_clause45_get_settings(struct efx_nic *efx, | 243 | extern void mdio_clause45_get_settings(struct efx_nic *efx, |
| 222 | struct ethtool_cmd *ecmd); | 244 | struct ethtool_cmd *ecmd); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index c505482c2520..59f261b4171f 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
| @@ -134,6 +134,8 @@ struct efx_special_buffer { | |||
| 134 | * Set only on the final fragment of a packet; %NULL for all other | 134 | * Set only on the final fragment of a packet; %NULL for all other |
| 135 | * fragments. When this fragment completes, then we can free this | 135 | * fragments. When this fragment completes, then we can free this |
| 136 | * skb. | 136 | * skb. |
| 137 | * @tsoh: The associated TSO header structure, or %NULL if this | ||
| 138 | * buffer is not a TSO header. | ||
| 137 | * @dma_addr: DMA address of the fragment. | 139 | * @dma_addr: DMA address of the fragment. |
| 138 | * @len: Length of this fragment. | 140 | * @len: Length of this fragment. |
| 139 | * This field is zero when the queue slot is empty. | 141 | * This field is zero when the queue slot is empty. |
| @@ -144,6 +146,7 @@ struct efx_special_buffer { | |||
| 144 | */ | 146 | */ |
| 145 | struct efx_tx_buffer { | 147 | struct efx_tx_buffer { |
| 146 | const struct sk_buff *skb; | 148 | const struct sk_buff *skb; |
| 149 | struct efx_tso_header *tsoh; | ||
| 147 | dma_addr_t dma_addr; | 150 | dma_addr_t dma_addr; |
| 148 | unsigned short len; | 151 | unsigned short len; |
| 149 | unsigned char continuation; | 152 | unsigned char continuation; |
| @@ -187,6 +190,13 @@ struct efx_tx_buffer { | |||
| 187 | * variable indicates that the queue is full. This is to | 190 | * variable indicates that the queue is full. This is to |
| 188 | * avoid cache-line ping-pong between the xmit path and the | 191 | * avoid cache-line ping-pong between the xmit path and the |
| 189 | * completion path. | 192 | * completion path. |
| 193 | * @tso_headers_free: A list of TSO headers allocated for this TX queue | ||
| 194 | * that are not in use, and so available for new TSO sends. The list | ||
| 195 | * is protected by the TX queue lock. | ||
| 196 | * @tso_bursts: Number of times TSO xmit invoked by kernel | ||
| 197 | * @tso_long_headers: Number of packets with headers too long for standard | ||
| 198 | * blocks | ||
| 199 | * @tso_packets: Number of packets via the TSO xmit path | ||
| 190 | */ | 200 | */ |
| 191 | struct efx_tx_queue { | 201 | struct efx_tx_queue { |
| 192 | /* Members which don't change on the fast path */ | 202 | /* Members which don't change on the fast path */ |
| @@ -206,6 +216,10 @@ struct efx_tx_queue { | |||
| 206 | unsigned int insert_count ____cacheline_aligned_in_smp; | 216 | unsigned int insert_count ____cacheline_aligned_in_smp; |
| 207 | unsigned int write_count; | 217 | unsigned int write_count; |
| 208 | unsigned int old_read_count; | 218 | unsigned int old_read_count; |
| 219 | struct efx_tso_header *tso_headers_free; | ||
| 220 | unsigned int tso_bursts; | ||
| 221 | unsigned int tso_long_headers; | ||
| 222 | unsigned int tso_packets; | ||
| 209 | }; | 223 | }; |
| 210 | 224 | ||
| 211 | /** | 225 | /** |
| @@ -434,6 +448,9 @@ struct efx_board { | |||
| 434 | struct efx_blinker blinker; | 448 | struct efx_blinker blinker; |
| 435 | }; | 449 | }; |
| 436 | 450 | ||
| 451 | #define STRING_TABLE_LOOKUP(val, member) \ | ||
| 452 | member ## _names[val] | ||
| 453 | |||
| 437 | enum efx_int_mode { | 454 | enum efx_int_mode { |
| 438 | /* Be careful if altering to correct macro below */ | 455 | /* Be careful if altering to correct macro below */ |
| 439 | EFX_INT_MODE_MSIX = 0, | 456 | EFX_INT_MODE_MSIX = 0, |
| @@ -506,6 +523,7 @@ enum efx_fc_type { | |||
| 506 | * @check_hw: Check hardware | 523 | * @check_hw: Check hardware |
| 507 | * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) | 524 | * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) |
| 508 | * @mmds: MMD presence mask | 525 | * @mmds: MMD presence mask |
| 526 | * @loopbacks: Supported loopback modes mask | ||
| 509 | */ | 527 | */ |
| 510 | struct efx_phy_operations { | 528 | struct efx_phy_operations { |
| 511 | int (*init) (struct efx_nic *efx); | 529 | int (*init) (struct efx_nic *efx); |
| @@ -515,6 +533,7 @@ struct efx_phy_operations { | |||
| 515 | int (*check_hw) (struct efx_nic *efx); | 533 | int (*check_hw) (struct efx_nic *efx); |
| 516 | void (*reset_xaui) (struct efx_nic *efx); | 534 | void (*reset_xaui) (struct efx_nic *efx); |
| 517 | int mmds; | 535 | int mmds; |
| 536 | unsigned loopbacks; | ||
| 518 | }; | 537 | }; |
| 519 | 538 | ||
| 520 | /* | 539 | /* |
| @@ -653,7 +672,6 @@ union efx_multicast_hash { | |||
| 653 | * @phy_op: PHY interface | 672 | * @phy_op: PHY interface |
| 654 | * @phy_data: PHY private data (including PHY-specific stats) | 673 | * @phy_data: PHY private data (including PHY-specific stats) |
| 655 | * @mii: PHY interface | 674 | * @mii: PHY interface |
| 656 | * @phy_powered: PHY power state | ||
| 657 | * @tx_disabled: PHY transmitter turned off | 675 | * @tx_disabled: PHY transmitter turned off |
| 658 | * @link_up: Link status | 676 | * @link_up: Link status |
| 659 | * @link_options: Link options (MII/GMII format) | 677 | * @link_options: Link options (MII/GMII format) |
| @@ -662,6 +680,9 @@ union efx_multicast_hash { | |||
| 662 | * @multicast_hash: Multicast hash table | 680 | * @multicast_hash: Multicast hash table |
| 663 | * @flow_control: Flow control flags - separate RX/TX so can't use link_options | 681 | * @flow_control: Flow control flags - separate RX/TX so can't use link_options |
| 664 | * @reconfigure_work: work item for dealing with PHY events | 682 | * @reconfigure_work: work item for dealing with PHY events |
| 683 | * @loopback_mode: Loopback status | ||
| 684 | * @loopback_modes: Supported loopback mode bitmask | ||
| 685 | * @loopback_selftest: Offline self-test private state | ||
| 665 | * | 686 | * |
| 666 | * The @priv field of the corresponding &struct net_device points to | 687 | * The @priv field of the corresponding &struct net_device points to |
| 667 | * this. | 688 | * this. |
| @@ -721,6 +742,7 @@ struct efx_nic { | |||
| 721 | struct efx_phy_operations *phy_op; | 742 | struct efx_phy_operations *phy_op; |
| 722 | void *phy_data; | 743 | void *phy_data; |
| 723 | struct mii_if_info mii; | 744 | struct mii_if_info mii; |
| 745 | unsigned tx_disabled; | ||
| 724 | 746 | ||
| 725 | int link_up; | 747 | int link_up; |
| 726 | unsigned int link_options; | 748 | unsigned int link_options; |
| @@ -732,6 +754,10 @@ struct efx_nic { | |||
| 732 | struct work_struct reconfigure_work; | 754 | struct work_struct reconfigure_work; |
| 733 | 755 | ||
| 734 | atomic_t rx_reset; | 756 | atomic_t rx_reset; |
| 757 | enum efx_loopback_mode loopback_mode; | ||
| 758 | unsigned int loopback_modes; | ||
| 759 | |||
| 760 | void *loopback_selftest; | ||
| 735 | }; | 761 | }; |
| 736 | 762 | ||
| 737 | /** | 763 | /** |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 551299b462ae..670622373ddf 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include "rx.h" | 19 | #include "rx.h" |
| 20 | #include "efx.h" | 20 | #include "efx.h" |
| 21 | #include "falcon.h" | 21 | #include "falcon.h" |
| 22 | #include "selftest.h" | ||
| 22 | #include "workarounds.h" | 23 | #include "workarounds.h" |
| 23 | 24 | ||
| 24 | /* Number of RX descriptors pushed at once. */ | 25 | /* Number of RX descriptors pushed at once. */ |
| @@ -683,6 +684,15 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
| 683 | struct sk_buff *skb; | 684 | struct sk_buff *skb; |
| 684 | int lro = efx->net_dev->features & NETIF_F_LRO; | 685 | int lro = efx->net_dev->features & NETIF_F_LRO; |
| 685 | 686 | ||
| 687 | /* If we're in loopback test, then pass the packet directly to the | ||
| 688 | * loopback layer, and free the rx_buf here | ||
| 689 | */ | ||
| 690 | if (unlikely(efx->loopback_selftest)) { | ||
| 691 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | ||
| 692 | efx_free_rx_buffer(efx, rx_buf); | ||
| 693 | goto done; | ||
| 694 | } | ||
| 695 | |||
| 686 | if (rx_buf->skb) { | 696 | if (rx_buf->skb) { |
| 687 | prefetch(skb_shinfo(rx_buf->skb)); | 697 | prefetch(skb_shinfo(rx_buf->skb)); |
| 688 | 698 | ||
| @@ -736,7 +746,6 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
| 736 | /* Update allocation strategy method */ | 746 | /* Update allocation strategy method */ |
| 737 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 747 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
| 738 | 748 | ||
| 739 | /* fall-thru */ | ||
| 740 | done: | 749 | done: |
| 741 | efx->net_dev->last_rx = jiffies; | 750 | efx->net_dev->last_rx = jiffies; |
| 742 | } | 751 | } |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c new file mode 100644 index 000000000000..cbda15946e8f --- /dev/null +++ b/drivers/net/sfc/selftest.c | |||
| @@ -0,0 +1,717 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/netdevice.h> | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/delay.h> | ||
| 14 | #include <linux/kernel_stat.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/ethtool.h> | ||
| 17 | #include <linux/ip.h> | ||
| 18 | #include <linux/in.h> | ||
| 19 | #include <linux/udp.h> | ||
| 20 | #include <linux/rtnetlink.h> | ||
| 21 | #include <asm/io.h> | ||
| 22 | #include "net_driver.h" | ||
| 23 | #include "ethtool.h" | ||
| 24 | #include "efx.h" | ||
| 25 | #include "falcon.h" | ||
| 26 | #include "selftest.h" | ||
| 27 | #include "boards.h" | ||
| 28 | #include "workarounds.h" | ||
| 29 | #include "mac.h" | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Loopback test packet structure | ||
| 33 | * | ||
| 34 | * The self-test should stress every RSS vector, and unfortunately | ||
| 35 | * Falcon only performs RSS on TCP/UDP packets. | ||
| 36 | */ | ||
| 37 | struct efx_loopback_payload { | ||
| 38 | struct ethhdr header; | ||
| 39 | struct iphdr ip; | ||
| 40 | struct udphdr udp; | ||
| 41 | __be16 iteration; | ||
| 42 | const char msg[64]; | ||
| 43 | } __attribute__ ((packed)); | ||
| 44 | |||
| 45 | /* Loopback test source MAC address */ | ||
| 46 | static const unsigned char payload_source[ETH_ALEN] = { | ||
| 47 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | ||
| 48 | }; | ||
| 49 | |||
| 50 | static const char *payload_msg = | ||
| 51 | "Hello world! This is an Efx loopback test in progress!"; | ||
| 52 | |||
| 53 | /** | ||
| 54 | * efx_selftest_state - persistent state during a selftest | ||
| 55 | * @flush: Drop all packets in efx_loopback_rx_packet | ||
| 56 | * @packet_count: Number of packets being used in this test | ||
| 57 | * @skbs: An array of skbs transmitted | ||
| 58 | * @rx_good: RX good packet count | ||
| 59 | * @rx_bad: RX bad packet count | ||
| 60 | * @payload: Payload used in tests | ||
| 61 | */ | ||
| 62 | struct efx_selftest_state { | ||
| 63 | int flush; | ||
| 64 | int packet_count; | ||
| 65 | struct sk_buff **skbs; | ||
| 66 | atomic_t rx_good; | ||
| 67 | atomic_t rx_bad; | ||
| 68 | struct efx_loopback_payload payload; | ||
| 69 | }; | ||
| 70 | |||
| 71 | /************************************************************************** | ||
| 72 | * | ||
| 73 | * Configurable values | ||
| 74 | * | ||
| 75 | **************************************************************************/ | ||
| 76 | |||
| 77 | /* Level of loopback testing | ||
| 78 | * | ||
| 79 | * The maximum packet burst length is 16**(n-1), i.e. | ||
| 80 | * | ||
| 81 | * - Level 0 : no packets | ||
| 82 | * - Level 1 : 1 packet | ||
| 83 | * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) | ||
| 84 | * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) | ||
| 85 | * | ||
| 86 | */ | ||
| 87 | static unsigned int loopback_test_level = 3; | ||
| 88 | |||
| 89 | /************************************************************************** | ||
| 90 | * | ||
| 91 | * Interrupt and event queue testing | ||
| 92 | * | ||
| 93 | **************************************************************************/ | ||
| 94 | |||
| 95 | /* Test generation and receipt of interrupts */ | ||
| 96 | static int efx_test_interrupts(struct efx_nic *efx, | ||
| 97 | struct efx_self_tests *tests) | ||
| 98 | { | ||
| 99 | struct efx_channel *channel; | ||
| 100 | |||
| 101 | EFX_LOG(efx, "testing interrupts\n"); | ||
| 102 | tests->interrupt = -1; | ||
| 103 | |||
| 104 | /* Reset interrupt flag */ | ||
| 105 | efx->last_irq_cpu = -1; | ||
| 106 | smp_wmb(); | ||
| 107 | |||
| 108 | /* ACK each interrupting event queue. Receiving an interrupt due to | ||
| 109 | * traffic before a test event is raised is considered a pass */ | ||
| 110 | efx_for_each_channel_with_interrupt(channel, efx) { | ||
| 111 | if (channel->work_pending) | ||
| 112 | efx_process_channel_now(channel); | ||
| 113 | if (efx->last_irq_cpu >= 0) | ||
| 114 | goto success; | ||
| 115 | } | ||
| 116 | |||
| 117 | falcon_generate_interrupt(efx); | ||
| 118 | |||
| 119 | /* Wait for arrival of test interrupt. */ | ||
| 120 | EFX_LOG(efx, "waiting for test interrupt\n"); | ||
| 121 | schedule_timeout_uninterruptible(HZ / 10); | ||
| 122 | if (efx->last_irq_cpu >= 0) | ||
| 123 | goto success; | ||
| 124 | |||
| 125 | EFX_ERR(efx, "timed out waiting for interrupt\n"); | ||
| 126 | return -ETIMEDOUT; | ||
| 127 | |||
| 128 | success: | ||
| 129 | EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", | ||
| 130 | efx->interrupt_mode, efx->last_irq_cpu); | ||
| 131 | tests->interrupt = 1; | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* Test generation and receipt of non-interrupting events */ | ||
| 136 | static int efx_test_eventq(struct efx_channel *channel, | ||
| 137 | struct efx_self_tests *tests) | ||
| 138 | { | ||
| 139 | unsigned int magic; | ||
| 140 | |||
| 141 | /* Channel specific code, limited to 20 bits */ | ||
| 142 | magic = (0x00010150 + channel->channel); | ||
| 143 | EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", | ||
| 144 | channel->channel, magic); | ||
| 145 | |||
| 146 | tests->eventq_dma[channel->channel] = -1; | ||
| 147 | tests->eventq_int[channel->channel] = 1; /* fake pass */ | ||
| 148 | tests->eventq_poll[channel->channel] = 1; /* fake pass */ | ||
| 149 | |||
| 150 | /* Reset flag and zero magic word */ | ||
| 151 | channel->efx->last_irq_cpu = -1; | ||
| 152 | channel->eventq_magic = 0; | ||
| 153 | smp_wmb(); | ||
| 154 | |||
| 155 | falcon_generate_test_event(channel, magic); | ||
| 156 | udelay(1); | ||
| 157 | |||
| 158 | efx_process_channel_now(channel); | ||
| 159 | if (channel->eventq_magic != magic) { | ||
| 160 | EFX_ERR(channel->efx, "channel %d failed to see test event\n", | ||
| 161 | channel->channel); | ||
| 162 | return -ETIMEDOUT; | ||
| 163 | } else { | ||
| 164 | tests->eventq_dma[channel->channel] = 1; | ||
| 165 | } | ||
| 166 | |||
| 167 | return 0; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* Test generation and receipt of interrupting events */ | ||
| 171 | static int efx_test_eventq_irq(struct efx_channel *channel, | ||
| 172 | struct efx_self_tests *tests) | ||
| 173 | { | ||
| 174 | unsigned int magic, count; | ||
| 175 | |||
| 176 | /* Channel specific code, limited to 20 bits */ | ||
| 177 | magic = (0x00010150 + channel->channel); | ||
| 178 | EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", | ||
| 179 | channel->channel, magic); | ||
| 180 | |||
| 181 | tests->eventq_dma[channel->channel] = -1; | ||
| 182 | tests->eventq_int[channel->channel] = -1; | ||
| 183 | tests->eventq_poll[channel->channel] = -1; | ||
| 184 | |||
| 185 | /* Reset flag and zero magic word */ | ||
| 186 | channel->efx->last_irq_cpu = -1; | ||
| 187 | channel->eventq_magic = 0; | ||
| 188 | smp_wmb(); | ||
| 189 | |||
| 190 | falcon_generate_test_event(channel, magic); | ||
| 191 | |||
| 192 | /* Wait for arrival of interrupt */ | ||
| 193 | count = 0; | ||
| 194 | do { | ||
| 195 | schedule_timeout_uninterruptible(HZ / 100); | ||
| 196 | |||
| 197 | if (channel->work_pending) | ||
| 198 | efx_process_channel_now(channel); | ||
| 199 | |||
| 200 | if (channel->eventq_magic == magic) | ||
| 201 | goto eventq_ok; | ||
| 202 | } while (++count < 2); | ||
| 203 | |||
| 204 | EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", | ||
| 205 | channel->channel); | ||
| 206 | |||
| 207 | /* See if interrupt arrived */ | ||
| 208 | if (channel->efx->last_irq_cpu >= 0) { | ||
| 209 | EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " | ||
| 210 | "during event queue test\n", channel->channel, | ||
| 211 | raw_smp_processor_id()); | ||
| 212 | tests->eventq_int[channel->channel] = 1; | ||
| 213 | } | ||
| 214 | |||
| 215 | /* Check to see if event was received even if interrupt wasn't */ | ||
| 216 | efx_process_channel_now(channel); | ||
| 217 | if (channel->eventq_magic == magic) { | ||
| 218 | EFX_ERR(channel->efx, "channel %d event was generated, but " | ||
| 219 | "failed to trigger an interrupt\n", channel->channel); | ||
| 220 | tests->eventq_dma[channel->channel] = 1; | ||
| 221 | } | ||
| 222 | |||
| 223 | return -ETIMEDOUT; | ||
| 224 | eventq_ok: | ||
| 225 | EFX_LOG(channel->efx, "channel %d event queue passed\n", | ||
| 226 | channel->channel); | ||
| 227 | tests->eventq_dma[channel->channel] = 1; | ||
| 228 | tests->eventq_int[channel->channel] = 1; | ||
| 229 | tests->eventq_poll[channel->channel] = 1; | ||
| 230 | return 0; | ||
| 231 | } | ||
| 232 | |||
| 233 | /************************************************************************** | ||
| 234 | * | ||
| 235 | * PHY testing | ||
| 236 | * | ||
| 237 | **************************************************************************/ | ||
| 238 | |||
| 239 | /* Check PHY presence by reading the PHY ID registers */ | ||
| 240 | static int efx_test_phy(struct efx_nic *efx, | ||
| 241 | struct efx_self_tests *tests) | ||
| 242 | { | ||
| 243 | u16 physid1, physid2; | ||
| 244 | struct mii_if_info *mii = &efx->mii; | ||
| 245 | struct net_device *net_dev = efx->net_dev; | ||
| 246 | |||
| 247 | if (efx->phy_type == PHY_TYPE_NONE) | ||
| 248 | return 0; | ||
| 249 | |||
| 250 | EFX_LOG(efx, "testing PHY presence\n"); | ||
| 251 | tests->phy_ok = -1; | ||
| 252 | |||
| 253 | physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); | ||
| 254 | physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); | ||
| 255 | |||
| 256 | if ((physid1 != 0x0000) && (physid1 != 0xffff) && | ||
| 257 | (physid2 != 0x0000) && (physid2 != 0xffff)) { | ||
| 258 | EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", | ||
| 259 | mii->phy_id, physid1, physid2); | ||
| 260 | tests->phy_ok = 1; | ||
| 261 | return 0; | ||
| 262 | } | ||
| 263 | |||
| 264 | EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); | ||
| 265 | return -ENODEV; | ||
| 266 | } | ||
| 267 | |||
| 268 | /************************************************************************** | ||
| 269 | * | ||
| 270 | * Loopback testing | ||
| 271 | * NB Only one loopback test can be executing concurrently. | ||
| 272 | * | ||
| 273 | **************************************************************************/ | ||
| 274 | |||
| 275 | /* Loopback test RX callback | ||
| 276 | * This is called for each received packet during loopback testing. | ||
| 277 | */ | ||
| 278 | void efx_loopback_rx_packet(struct efx_nic *efx, | ||
| 279 | const char *buf_ptr, int pkt_len) | ||
| 280 | { | ||
| 281 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 282 | struct efx_loopback_payload *received; | ||
| 283 | struct efx_loopback_payload *payload; | ||
| 284 | |||
| 285 | BUG_ON(!buf_ptr); | ||
| 286 | |||
| 287 | /* If we are just flushing, then drop the packet */ | ||
| 288 | if ((state == NULL) || state->flush) | ||
| 289 | return; | ||
| 290 | |||
| 291 | payload = &state->payload; | ||
| 292 | |||
| 293 | received = (struct efx_loopback_payload *)(char *) buf_ptr; | ||
| 294 | received->ip.saddr = payload->ip.saddr; | ||
| 295 | received->ip.check = payload->ip.check; | ||
| 296 | |||
| 297 | /* Check that header exists */ | ||
| 298 | if (pkt_len < sizeof(received->header)) { | ||
| 299 | EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " | ||
| 300 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | ||
| 301 | goto err; | ||
| 302 | } | ||
| 303 | |||
| 304 | /* Check that the ethernet header exists */ | ||
| 305 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | ||
| 306 | EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", | ||
| 307 | LOOPBACK_MODE(efx)); | ||
| 308 | goto err; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* Check packet length */ | ||
| 312 | if (pkt_len != sizeof(*payload)) { | ||
| 313 | EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " | ||
| 314 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | ||
| 315 | LOOPBACK_MODE(efx)); | ||
| 316 | goto err; | ||
| 317 | } | ||
| 318 | |||
| 319 | /* Check that IP header matches */ | ||
| 320 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | ||
| 321 | EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", | ||
| 322 | LOOPBACK_MODE(efx)); | ||
| 323 | goto err; | ||
| 324 | } | ||
| 325 | |||
| 326 | /* Check that msg and padding matches */ | ||
| 327 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | ||
| 328 | EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", | ||
| 329 | LOOPBACK_MODE(efx)); | ||
| 330 | goto err; | ||
| 331 | } | ||
| 332 | |||
| 333 | /* Check that iteration matches */ | ||
| 334 | if (received->iteration != payload->iteration) { | ||
| 335 | EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " | ||
| 336 | "%s loopback test\n", ntohs(received->iteration), | ||
| 337 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | ||
| 338 | goto err; | ||
| 339 | } | ||
| 340 | |||
| 341 | /* Increase correct RX count */ | ||
| 342 | EFX_TRACE(efx, "got loopback RX in %s loopback test\n", | ||
| 343 | LOOPBACK_MODE(efx)); | ||
| 344 | |||
| 345 | atomic_inc(&state->rx_good); | ||
| 346 | return; | ||
| 347 | |||
| 348 | err: | ||
| 349 | #ifdef EFX_ENABLE_DEBUG | ||
| 350 | if (atomic_read(&state->rx_bad) == 0) { | ||
| 351 | EFX_ERR(efx, "received packet:\n"); | ||
| 352 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
| 353 | buf_ptr, pkt_len, 0); | ||
| 354 | EFX_ERR(efx, "expected packet:\n"); | ||
| 355 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
| 356 | &state->payload, sizeof(state->payload), 0); | ||
| 357 | } | ||
| 358 | #endif | ||
| 359 | atomic_inc(&state->rx_bad); | ||
| 360 | } | ||
| 361 | |||
| 362 | /* Initialise an efx_selftest_state for a new iteration */ | ||
| 363 | static void efx_iterate_state(struct efx_nic *efx) | ||
| 364 | { | ||
| 365 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 366 | struct net_device *net_dev = efx->net_dev; | ||
| 367 | struct efx_loopback_payload *payload = &state->payload; | ||
| 368 | |||
| 369 | /* Initialise the layerII header */ | ||
| 370 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); | ||
| 371 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); | ||
| 372 | payload->header.h_proto = htons(ETH_P_IP); | ||
| 373 | |||
| 374 | /* saddr set later and used as incrementing count */ | ||
| 375 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | ||
| 376 | payload->ip.ihl = 5; | ||
| 377 | payload->ip.check = htons(0xdead); | ||
| 378 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); | ||
| 379 | payload->ip.version = IPVERSION; | ||
| 380 | payload->ip.protocol = IPPROTO_UDP; | ||
| 381 | |||
| 382 | /* Initialise udp header */ | ||
| 383 | payload->udp.source = 0; | ||
| 384 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - | ||
| 385 | sizeof(struct iphdr)); | ||
| 386 | payload->udp.check = 0; /* checksum ignored */ | ||
| 387 | |||
| 388 | /* Fill out payload */ | ||
| 389 | payload->iteration = htons(ntohs(payload->iteration) + 1); | ||
| 390 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | ||
| 391 | |||
| 392 | /* Fill out remaining state members */ | ||
| 393 | atomic_set(&state->rx_good, 0); | ||
| 394 | atomic_set(&state->rx_bad, 0); | ||
| 395 | smp_wmb(); | ||
| 396 | } | ||
| 397 | |||
| 398 | static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | ||
| 399 | { | ||
| 400 | struct efx_nic *efx = tx_queue->efx; | ||
| 401 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 402 | struct efx_loopback_payload *payload; | ||
| 403 | struct sk_buff *skb; | ||
| 404 | int i, rc; | ||
| 405 | |||
| 406 | /* Transmit N copies of buffer */ | ||
| 407 | for (i = 0; i < state->packet_count; i++) { | ||
| 408 | /* Allocate an skb, holding an extra reference for | ||
| 409 | * transmit completion counting */ | ||
| 410 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); | ||
| 411 | if (!skb) | ||
| 412 | return -ENOMEM; | ||
| 413 | state->skbs[i] = skb; | ||
| 414 | skb_get(skb); | ||
| 415 | |||
| 416 | /* Copy the payload in, incrementing the source address to | ||
| 417 | * exercise the rss vectors */ | ||
| 418 | payload = ((struct efx_loopback_payload *) | ||
| 419 | skb_put(skb, sizeof(state->payload))); | ||
| 420 | memcpy(payload, &state->payload, sizeof(state->payload)); | ||
| 421 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | ||
| 422 | |||
| 423 | /* Ensure everything we've written is visible to the | ||
| 424 | * interrupt handler. */ | ||
| 425 | smp_wmb(); | ||
| 426 | |||
| 427 | if (NET_DEV_REGISTERED(efx)) | ||
| 428 | netif_tx_lock_bh(efx->net_dev); | ||
| 429 | rc = efx_xmit(efx, tx_queue, skb); | ||
| 430 | if (NET_DEV_REGISTERED(efx)) | ||
| 431 | netif_tx_unlock_bh(efx->net_dev); | ||
| 432 | |||
| 433 | if (rc != NETDEV_TX_OK) { | ||
| 434 | EFX_ERR(efx, "TX queue %d could not transmit packet %d " | ||
| 435 | "of %d in %s loopback test\n", tx_queue->queue, | ||
| 436 | i + 1, state->packet_count, LOOPBACK_MODE(efx)); | ||
| 437 | |||
| 438 | /* Defer cleaning up the other skbs for the caller */ | ||
| 439 | kfree_skb(skb); | ||
| 440 | return -EPIPE; | ||
| 441 | } | ||
| 442 | } | ||
| 443 | |||
| 444 | return 0; | ||
| 445 | } | ||
| 446 | |||
| 447 | static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | ||
| 448 | struct efx_loopback_self_tests *lb_tests) | ||
| 449 | { | ||
| 450 | struct efx_nic *efx = tx_queue->efx; | ||
| 451 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 452 | struct sk_buff *skb; | ||
| 453 | int tx_done = 0, rx_good, rx_bad; | ||
| 454 | int i, rc = 0; | ||
| 455 | |||
| 456 | if (NET_DEV_REGISTERED(efx)) | ||
| 457 | netif_tx_lock_bh(efx->net_dev); | ||
| 458 | |||
| 459 | /* Count the number of tx completions, and decrement the refcnt. Any | ||
| 460 | * skbs not already completed will be free'd when the queue is flushed */ | ||
| 461 | for (i=0; i < state->packet_count; i++) { | ||
| 462 | skb = state->skbs[i]; | ||
| 463 | if (skb && !skb_shared(skb)) | ||
| 464 | ++tx_done; | ||
| 465 | dev_kfree_skb_any(skb); | ||
| 466 | } | ||
| 467 | |||
| 468 | if (NET_DEV_REGISTERED(efx)) | ||
| 469 | netif_tx_unlock_bh(efx->net_dev); | ||
| 470 | |||
| 471 | /* Check TX completion and received packet counts */ | ||
| 472 | rx_good = atomic_read(&state->rx_good); | ||
| 473 | rx_bad = atomic_read(&state->rx_bad); | ||
| 474 | if (tx_done != state->packet_count) { | ||
| 475 | /* Don't free the skbs; they will be picked up on TX | ||
| 476 | * overflow or channel teardown. | ||
| 477 | */ | ||
| 478 | EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " | ||
| 479 | "TX completion events in %s loopback test\n", | ||
| 480 | tx_queue->queue, tx_done, state->packet_count, | ||
| 481 | LOOPBACK_MODE(efx)); | ||
| 482 | rc = -ETIMEDOUT; | ||
| 483 | /* Allow to fall through so we see the RX errors as well */ | ||
| 484 | } | ||
| 485 | |||
| 486 | /* We may always be up to a flush away from our desired packet total */ | ||
| 487 | if (rx_good != state->packet_count) { | ||
| 488 | EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " | ||
| 489 | "received packets in %s loopback test\n", | ||
| 490 | tx_queue->queue, rx_good, state->packet_count, | ||
| 491 | LOOPBACK_MODE(efx)); | ||
| 492 | rc = -ETIMEDOUT; | ||
| 493 | /* Fall through */ | ||
| 494 | } | ||
| 495 | |||
| 496 | /* Update loopback test structure */ | ||
| 497 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | ||
| 498 | lb_tests->tx_done[tx_queue->queue] += tx_done; | ||
| 499 | lb_tests->rx_good += rx_good; | ||
| 500 | lb_tests->rx_bad += rx_bad; | ||
| 501 | |||
| 502 | return rc; | ||
| 503 | } | ||
| 504 | |||
| 505 | static int | ||
| 506 | efx_test_loopback(struct efx_tx_queue *tx_queue, | ||
| 507 | struct efx_loopback_self_tests *lb_tests) | ||
| 508 | { | ||
| 509 | struct efx_nic *efx = tx_queue->efx; | ||
| 510 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 511 | struct efx_channel *channel; | ||
| 512 | int i, rc = 0; | ||
| 513 | |||
| 514 | for (i = 0; i < loopback_test_level; i++) { | ||
| 515 | /* Determine how many packets to send */ | ||
| 516 | state->packet_count = (efx->type->txd_ring_mask + 1) / 3; | ||
| 517 | state->packet_count = min(1 << (i << 2), state->packet_count); | ||
| 518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | ||
| 519 | state->packet_count, GFP_KERNEL); | ||
| 520 | state->flush = 0; | ||
| 521 | |||
| 522 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | ||
| 523 | "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
| 524 | state->packet_count); | ||
| 525 | |||
| 526 | efx_iterate_state(efx); | ||
| 527 | rc = efx_tx_loopback(tx_queue); | ||
| 528 | |||
| 529 | /* NAPI polling is not enabled, so process channels synchronously */ | ||
| 530 | schedule_timeout_uninterruptible(HZ / 50); | ||
| 531 | efx_for_each_channel_with_interrupt(channel, efx) { | ||
| 532 | if (channel->work_pending) | ||
| 533 | efx_process_channel_now(channel); | ||
| 534 | } | ||
| 535 | |||
| 536 | rc |= efx_rx_loopback(tx_queue, lb_tests); | ||
| 537 | kfree(state->skbs); | ||
| 538 | |||
| 539 | if (rc) { | ||
| 540 | /* Wait a while to ensure there are no packets | ||
| 541 | * floating around after a failure. */ | ||
| 542 | schedule_timeout_uninterruptible(HZ / 10); | ||
| 543 | return rc; | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " | ||
| 548 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
| 549 | state->packet_count); | ||
| 550 | |||
| 551 | return rc; | ||
| 552 | } | ||
| 553 | |||
| 554 | static int efx_test_loopbacks(struct efx_nic *efx, | ||
| 555 | struct efx_self_tests *tests, | ||
| 556 | unsigned int loopback_modes) | ||
| 557 | { | ||
| 558 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
| 559 | struct ethtool_cmd ecmd, ecmd_loopback; | ||
| 560 | struct efx_tx_queue *tx_queue; | ||
| 561 | enum efx_loopback_mode old_mode, mode; | ||
| 562 | int count, rc = 0, link_up; | ||
| 563 | |||
| 564 | rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); | ||
| 565 | if (rc) { | ||
| 566 | EFX_ERR(efx, "could not get GMII settings\n"); | ||
| 567 | return rc; | ||
| 568 | } | ||
| 569 | old_mode = efx->loopback_mode; | ||
| 570 | |||
| 571 | /* Disable autonegotiation for the purposes of loopback */ | ||
| 572 | memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); | ||
| 573 | if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { | ||
| 574 | ecmd_loopback.autoneg = AUTONEG_DISABLE; | ||
| 575 | ecmd_loopback.duplex = DUPLEX_FULL; | ||
| 576 | ecmd_loopback.speed = SPEED_10000; | ||
| 577 | } | ||
| 578 | |||
| 579 | rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); | ||
| 580 | if (rc) { | ||
| 581 | EFX_ERR(efx, "could not disable autonegotiation\n"); | ||
| 582 | goto out; | ||
| 583 | } | ||
| 584 | tests->loopback_speed = ecmd_loopback.speed; | ||
| 585 | tests->loopback_full_duplex = ecmd_loopback.duplex; | ||
| 586 | |||
| 587 | /* Test all supported loopback modes */ | ||
| 588 | for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { | ||
| 589 | if (!(loopback_modes & (1 << mode))) | ||
| 590 | continue; | ||
| 591 | |||
| 592 | /* Move the port into the specified loopback mode. */ | ||
| 593 | state->flush = 1; | ||
| 594 | efx->loopback_mode = mode; | ||
| 595 | efx_reconfigure_port(efx); | ||
| 596 | |||
| 597 | /* Wait for the PHY to signal the link is up */ | ||
| 598 | count = 0; | ||
| 599 | do { | ||
| 600 | struct efx_channel *channel = &efx->channel[0]; | ||
| 601 | |||
| 602 | falcon_check_xmac(efx); | ||
| 603 | schedule_timeout_uninterruptible(HZ / 10); | ||
| 604 | if (channel->work_pending) | ||
| 605 | efx_process_channel_now(channel); | ||
| 606 | /* Wait for PHY events to be processed */ | ||
| 607 | flush_workqueue(efx->workqueue); | ||
| 608 | rmb(); | ||
| 609 | |||
| 610 | /* efx->link_up can be 1 even if the XAUI link is down, | ||
| 611 | * (bug5762). Usually, it's not worth bothering with the | ||
| 612 | * difference, but for selftests, we need that extra | ||
| 613 | * guarantee that the link is really, really, up. | ||
| 614 | */ | ||
| 615 | link_up = efx->link_up; | ||
| 616 | if (!falcon_xaui_link_ok(efx)) | ||
| 617 | link_up = 0; | ||
| 618 | |||
| 619 | } while ((++count < 20) && !link_up); | ||
| 620 | |||
| 621 | /* The link should now be up. If it isn't, there is no point | ||
| 622 | * in attempting a loopback test */ | ||
| 623 | if (!link_up) { | ||
| 624 | EFX_ERR(efx, "loopback %s never came up\n", | ||
| 625 | LOOPBACK_MODE(efx)); | ||
| 626 | rc = -EIO; | ||
| 627 | goto out; | ||
| 628 | } | ||
| 629 | |||
| 630 | EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", | ||
| 631 | LOOPBACK_MODE(efx), count); | ||
| 632 | |||
| 633 | /* Test every TX queue */ | ||
| 634 | efx_for_each_tx_queue(tx_queue, efx) { | ||
| 635 | rc |= efx_test_loopback(tx_queue, | ||
| 636 | &tests->loopback[mode]); | ||
| 637 | if (rc) | ||
| 638 | goto out; | ||
| 639 | } | ||
| 640 | } | ||
| 641 | |||
| 642 | out: | ||
| 643 | /* Take out of loopback and restore PHY settings */ | ||
| 644 | state->flush = 1; | ||
| 645 | efx->loopback_mode = old_mode; | ||
| 646 | efx_ethtool_set_settings(efx->net_dev, &ecmd); | ||
| 647 | |||
| 648 | return rc; | ||
| 649 | } | ||
| 650 | |||
| 651 | /************************************************************************** | ||
| 652 | * | ||
| 653 | * Entry points | ||
| 654 | * | ||
| 655 | *************************************************************************/ | ||
| 656 | |||
| 657 | /* Online (i.e. non-disruptive) testing | ||
| 658 | * This checks interrupt generation, event delivery and PHY presence. */ | ||
| 659 | int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) | ||
| 660 | { | ||
| 661 | struct efx_channel *channel; | ||
| 662 | int rc = 0; | ||
| 663 | |||
| 664 | EFX_LOG(efx, "performing online self-tests\n"); | ||
| 665 | |||
| 666 | rc |= efx_test_interrupts(efx, tests); | ||
| 667 | efx_for_each_channel(channel, efx) { | ||
| 668 | if (channel->has_interrupt) | ||
| 669 | rc |= efx_test_eventq_irq(channel, tests); | ||
| 670 | else | ||
| 671 | rc |= efx_test_eventq(channel, tests); | ||
| 672 | } | ||
| 673 | rc |= efx_test_phy(efx, tests); | ||
| 674 | |||
| 675 | if (rc) | ||
| 676 | EFX_ERR(efx, "failed online self-tests\n"); | ||
| 677 | |||
| 678 | return rc; | ||
| 679 | } | ||
| 680 | |||
| 681 | /* Offline (i.e. disruptive) testing | ||
| 682 | * This checks MAC and PHY loopback on the specified port. */ | ||
| 683 | int efx_offline_test(struct efx_nic *efx, | ||
| 684 | struct efx_self_tests *tests, unsigned int loopback_modes) | ||
| 685 | { | ||
| 686 | struct efx_selftest_state *state; | ||
| 687 | int rc = 0; | ||
| 688 | |||
| 689 | EFX_LOG(efx, "performing offline self-tests\n"); | ||
| 690 | |||
| 691 | /* Create a selftest_state structure to hold state for the test */ | ||
| 692 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
| 693 | if (state == NULL) { | ||
| 694 | rc = -ENOMEM; | ||
| 695 | goto out; | ||
| 696 | } | ||
| 697 | |||
| 698 | /* Set the port loopback_selftest member. From this point on | ||
| 699 | * all received packets will be dropped. Mark the state as | ||
| 700 | * "flushing" so all inflight packets are dropped */ | ||
| 701 | BUG_ON(efx->loopback_selftest); | ||
| 702 | state->flush = 1; | ||
| 703 | efx->loopback_selftest = (void *)state; | ||
| 704 | |||
| 705 | rc = efx_test_loopbacks(efx, tests, loopback_modes); | ||
| 706 | |||
| 707 | efx->loopback_selftest = NULL; | ||
| 708 | wmb(); | ||
| 709 | kfree(state); | ||
| 710 | |||
| 711 | out: | ||
| 712 | if (rc) | ||
| 713 | EFX_ERR(efx, "failed offline self-tests\n"); | ||
| 714 | |||
| 715 | return rc; | ||
| 716 | } | ||
| 717 | |||
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h new file mode 100644 index 000000000000..f6999c2b622d --- /dev/null +++ b/drivers/net/sfc/selftest.h | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_SELFTEST_H | ||
| 12 | #define EFX_SELFTEST_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Self tests | ||
| 18 | */ | ||
| 19 | |||
| 20 | struct efx_loopback_self_tests { | ||
| 21 | int tx_sent[EFX_MAX_TX_QUEUES]; | ||
| 22 | int tx_done[EFX_MAX_TX_QUEUES]; | ||
| 23 | int rx_good; | ||
| 24 | int rx_bad; | ||
| 25 | }; | ||
| 26 | |||
| 27 | /* Efx self test results | ||
| 28 | * For fields which are not counters, 1 indicates success and -1 | ||
| 29 | * indicates failure. | ||
| 30 | */ | ||
| 31 | struct efx_self_tests { | ||
| 32 | int interrupt; | ||
| 33 | int eventq_dma[EFX_MAX_CHANNELS]; | ||
| 34 | int eventq_int[EFX_MAX_CHANNELS]; | ||
| 35 | int eventq_poll[EFX_MAX_CHANNELS]; | ||
| 36 | int phy_ok; | ||
| 37 | int loopback_speed; | ||
| 38 | int loopback_full_duplex; | ||
| 39 | struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; | ||
| 40 | }; | ||
| 41 | |||
| 42 | extern void efx_loopback_rx_packet(struct efx_nic *efx, | ||
| 43 | const char *buf_ptr, int pkt_len); | ||
| 44 | extern int efx_online_test(struct efx_nic *efx, | ||
| 45 | struct efx_self_tests *tests); | ||
| 46 | extern int efx_offline_test(struct efx_nic *efx, | ||
| 47 | struct efx_self_tests *tests, | ||
| 48 | unsigned int loopback_modes); | ||
| 49 | |||
| 50 | #endif /* EFX_SELFTEST_H */ | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 11fa9fb8f48b..725d1a539c49 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
| @@ -130,6 +130,15 @@ void sfe4001_poweroff(struct efx_nic *efx) | |||
| 130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | 130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected | ||
| 134 | * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- | ||
| 135 | * up to allow writing the flash (done through MDIO from userland). | ||
| 136 | */ | ||
| 137 | unsigned int sfe4001_phy_flash_cfg; | ||
| 138 | module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); | ||
| 139 | MODULE_PARM_DESC(phy_flash_cfg, | ||
| 140 | "Force PHY to enter flash configuration mode"); | ||
| 141 | |||
| 133 | /* This board uses an I2C expander to provider power to the PHY, which needs to | 142 | /* This board uses an I2C expander to provider power to the PHY, which needs to |
| 134 | * be turned on before the PHY can be used. | 143 | * be turned on before the PHY can be used. |
| 135 | * Context: Process context, rtnl lock held | 144 | * Context: Process context, rtnl lock held |
| @@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx) | |||
| 203 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | 212 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | |
| 204 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | 213 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | |
| 205 | (1 << P0_X_TRST_LBN)); | 214 | (1 << P0_X_TRST_LBN)); |
| 215 | if (sfe4001_phy_flash_cfg) | ||
| 216 | out |= 1 << P0_EN_3V3X_LBN; | ||
| 206 | 217 | ||
| 207 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 218 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
| 208 | if (rc) | 219 | if (rc) |
| @@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx) | |||
| 226 | if (in & (1 << P1_AFE_PWD_LBN)) | 237 | if (in & (1 << P1_AFE_PWD_LBN)) |
| 227 | goto done; | 238 | goto done; |
| 228 | 239 | ||
| 240 | /* DSP doesn't look powered in flash config mode */ | ||
| 241 | if (sfe4001_phy_flash_cfg) | ||
| 242 | goto done; | ||
| 229 | } while (++count < 20); | 243 | } while (++count < 20); |
| 230 | 244 | ||
| 231 | EFX_INFO(efx, "timed out waiting for power\n"); | 245 | EFX_INFO(efx, "timed out waiting for power\n"); |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index a2e9f79e47b1..b1cd6deec01f 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
| @@ -24,6 +24,11 @@ | |||
| 24 | MDIO_MMDREG_DEVS0_PCS | \ | 24 | MDIO_MMDREG_DEVS0_PCS | \ |
| 25 | MDIO_MMDREG_DEVS0_PHYXS) | 25 | MDIO_MMDREG_DEVS0_PHYXS) |
| 26 | 26 | ||
| 27 | #define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ | ||
| 28 | (1 << LOOPBACK_PCS) | \ | ||
| 29 | (1 << LOOPBACK_PMAPMD) | \ | ||
| 30 | (1 << LOOPBACK_NETWORK)) | ||
| 31 | |||
| 27 | /* We complain if we fail to see the link partner as 10G capable this many | 32 | /* We complain if we fail to see the link partner as 10G capable this many |
| 28 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) | 33 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) |
| 29 | */ | 34 | */ |
| @@ -72,6 +77,10 @@ | |||
| 72 | #define PMA_PMD_BIST_RXD_LBN (1) | 77 | #define PMA_PMD_BIST_RXD_LBN (1) |
| 73 | #define PMA_PMD_BIST_AFE_LBN (0) | 78 | #define PMA_PMD_BIST_AFE_LBN (0) |
| 74 | 79 | ||
| 80 | /* Special Software reset register */ | ||
| 81 | #define PMA_PMD_EXT_CTRL_REG 49152 | ||
| 82 | #define PMA_PMD_EXT_SSR_LBN 15 | ||
| 83 | |||
| 75 | #define BIST_MAX_DELAY (1000) | 84 | #define BIST_MAX_DELAY (1000) |
| 76 | #define BIST_POLL_DELAY (10) | 85 | #define BIST_POLL_DELAY (10) |
| 77 | 86 | ||
| @@ -86,6 +95,11 @@ | |||
| 86 | #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ | 95 | #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ |
| 87 | #define CLK312_EN_LBN 3 | 96 | #define CLK312_EN_LBN 3 |
| 88 | 97 | ||
| 98 | /* PHYXS registers */ | ||
| 99 | #define PHYXS_TEST1 (49162) | ||
| 100 | #define LOOPBACK_NEAR_LBN (8) | ||
| 101 | #define LOOPBACK_NEAR_WIDTH (1) | ||
| 102 | |||
| 89 | /* Boot status register */ | 103 | /* Boot status register */ |
| 90 | #define PCS_BOOT_STATUS_REG (0xd000) | 104 | #define PCS_BOOT_STATUS_REG (0xd000) |
| 91 | #define PCS_BOOT_FATAL_ERR_LBN (0) | 105 | #define PCS_BOOT_FATAL_ERR_LBN (0) |
| @@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold, | |||
| 106 | 120 | ||
| 107 | struct tenxpress_phy_data { | 121 | struct tenxpress_phy_data { |
| 108 | enum tenxpress_state state; | 122 | enum tenxpress_state state; |
| 123 | enum efx_loopback_mode loopback_mode; | ||
| 109 | atomic_t bad_crc_count; | 124 | atomic_t bad_crc_count; |
| 125 | int tx_disabled; | ||
| 110 | int bad_lp_tries; | 126 | int bad_lp_tries; |
| 111 | }; | 127 | }; |
| 112 | 128 | ||
| @@ -199,10 +215,12 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
| 199 | 215 | ||
| 200 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | 216 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); |
| 201 | 217 | ||
| 202 | rc = mdio_clause45_wait_reset_mmds(efx, | 218 | if (!sfe4001_phy_flash_cfg) { |
| 203 | TENXPRESS_REQUIRED_DEVS); | 219 | rc = mdio_clause45_wait_reset_mmds(efx, |
| 204 | if (rc < 0) | 220 | TENXPRESS_REQUIRED_DEVS); |
| 205 | goto fail; | 221 | if (rc < 0) |
| 222 | goto fail; | ||
| 223 | } | ||
| 206 | 224 | ||
| 207 | rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); | 225 | rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); |
| 208 | if (rc < 0) | 226 | if (rc < 0) |
| @@ -225,6 +243,35 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
| 225 | return rc; | 243 | return rc; |
| 226 | } | 244 | } |
| 227 | 245 | ||
| 246 | static int tenxpress_special_reset(struct efx_nic *efx) | ||
| 247 | { | ||
| 248 | int rc, reg; | ||
| 249 | |||
| 250 | EFX_TRACE(efx, "%s\n", __func__); | ||
| 251 | |||
| 252 | /* Initiate reset */ | ||
| 253 | reg = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 254 | MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); | ||
| 255 | reg |= (1 << PMA_PMD_EXT_SSR_LBN); | ||
| 256 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 257 | PMA_PMD_EXT_CTRL_REG, reg); | ||
| 258 | |||
| 259 | msleep(200); | ||
| 260 | |||
| 261 | /* Wait for the blocks to come out of reset */ | ||
| 262 | rc = mdio_clause45_wait_reset_mmds(efx, | ||
| 263 | TENXPRESS_REQUIRED_DEVS); | ||
| 264 | if (rc < 0) | ||
| 265 | return rc; | ||
| 266 | |||
| 267 | /* Try and reconfigure the device */ | ||
| 268 | rc = tenxpress_init(efx); | ||
| 269 | if (rc < 0) | ||
| 270 | return rc; | ||
| 271 | |||
| 272 | return 0; | ||
| 273 | } | ||
| 274 | |||
| 228 | static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) | 275 | static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) |
| 229 | { | 276 | { |
| 230 | struct tenxpress_phy_data *pd = efx->phy_data; | 277 | struct tenxpress_phy_data *pd = efx->phy_data; |
| @@ -299,11 +346,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) | |||
| 299 | return ok; | 346 | return ok; |
| 300 | } | 347 | } |
| 301 | 348 | ||
| 349 | static void tenxpress_phyxs_loopback(struct efx_nic *efx) | ||
| 350 | { | ||
| 351 | int phy_id = efx->mii.phy_id; | ||
| 352 | int ctrl1, ctrl2; | ||
| 353 | |||
| 354 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
| 355 | PHYXS_TEST1); | ||
| 356 | if (efx->loopback_mode == LOOPBACK_PHYXS) | ||
| 357 | ctrl2 |= (1 << LOOPBACK_NEAR_LBN); | ||
| 358 | else | ||
| 359 | ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); | ||
| 360 | if (ctrl1 != ctrl2) | ||
| 361 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, | ||
| 362 | PHYXS_TEST1, ctrl2); | ||
| 363 | } | ||
| 364 | |||
| 302 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) | 365 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) |
| 303 | { | 366 | { |
| 367 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
| 368 | int loop_change = LOOPBACK_OUT_OF(phy_data, efx, | ||
| 369 | TENXPRESS_LOOPBACKS); | ||
| 370 | |||
| 304 | if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) | 371 | if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) |
| 305 | return; | 372 | return; |
| 306 | 373 | ||
| 374 | /* When coming out of transmit disable, coming out of low power | ||
| 375 | * mode, or moving out of any PHY internal loopback mode, | ||
| 376 | * perform a special software reset */ | ||
| 377 | if ((phy_data->tx_disabled && !efx->tx_disabled) || | ||
| 378 | loop_change) { | ||
| 379 | (void) tenxpress_special_reset(efx); | ||
| 380 | falcon_reset_xaui(efx); | ||
| 381 | } | ||
| 382 | |||
| 383 | mdio_clause45_transmit_disable(efx); | ||
| 384 | mdio_clause45_phy_reconfigure(efx); | ||
| 385 | tenxpress_phyxs_loopback(efx); | ||
| 386 | |||
| 387 | phy_data->tx_disabled = efx->tx_disabled; | ||
| 388 | phy_data->loopback_mode = efx->loopback_mode; | ||
| 307 | efx->link_up = tenxpress_link_ok(efx, 0); | 389 | efx->link_up = tenxpress_link_ok(efx, 0); |
| 308 | efx->link_options = GM_LPA_10000FULL; | 390 | efx->link_options = GM_LPA_10000FULL; |
| 309 | } | 391 | } |
| @@ -431,4 +513,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = { | |||
| 431 | .clear_interrupt = tenxpress_phy_clear_interrupt, | 513 | .clear_interrupt = tenxpress_phy_clear_interrupt, |
| 432 | .reset_xaui = tenxpress_reset_xaui, | 514 | .reset_xaui = tenxpress_reset_xaui, |
| 433 | .mmds = TENXPRESS_REQUIRED_DEVS, | 515 | .mmds = TENXPRESS_REQUIRED_DEVS, |
| 516 | .loopbacks = TENXPRESS_LOOPBACKS, | ||
| 434 | }; | 517 | }; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index fbb866b2185e..9b436f5b4888 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
| @@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | |||
| 82 | } | 82 | } |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | /** | ||
| 86 | * struct efx_tso_header - a DMA mapped buffer for packet headers | ||
| 87 | * @next: Linked list of free ones. | ||
| 88 | * The list is protected by the TX queue lock. | ||
| 89 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | ||
| 90 | * @dma_addr: The DMA address of the header below. | ||
| 91 | * | ||
| 92 | * This controls the memory used for a TSO header. Use TSOH_DATA() | ||
| 93 | * to find the packet header data. Use TSOH_SIZE() to calculate the | ||
| 94 | * total size required for a given packet header length. TSO headers | ||
| 95 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. | ||
| 96 | */ | ||
| 97 | struct efx_tso_header { | ||
| 98 | union { | ||
| 99 | struct efx_tso_header *next; | ||
| 100 | size_t unmap_len; | ||
| 101 | }; | ||
| 102 | dma_addr_t dma_addr; | ||
| 103 | }; | ||
| 104 | |||
| 105 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
| 106 | const struct sk_buff *skb); | ||
| 107 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); | ||
| 108 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | ||
| 109 | struct efx_tso_header *tsoh); | ||
| 110 | |||
| 111 | static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, | ||
| 112 | struct efx_tx_buffer *buffer) | ||
| 113 | { | ||
| 114 | if (buffer->tsoh) { | ||
| 115 | if (likely(!buffer->tsoh->unmap_len)) { | ||
| 116 | buffer->tsoh->next = tx_queue->tso_headers_free; | ||
| 117 | tx_queue->tso_headers_free = buffer->tsoh; | ||
| 118 | } else { | ||
| 119 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); | ||
| 120 | } | ||
| 121 | buffer->tsoh = NULL; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 85 | 125 | ||
| 86 | /* | 126 | /* |
| 87 | * Add a socket buffer to a TX queue | 127 | * Add a socket buffer to a TX queue |
| @@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
| 114 | 154 | ||
| 115 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 155 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
| 116 | 156 | ||
| 157 | if (skb_shinfo((struct sk_buff *)skb)->gso_size) | ||
| 158 | return efx_enqueue_skb_tso(tx_queue, skb); | ||
| 159 | |||
| 117 | /* Get size of the initial fragment */ | 160 | /* Get size of the initial fragment */ |
| 118 | len = skb_headlen(skb); | 161 | len = skb_headlen(skb); |
| 119 | 162 | ||
| @@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
| 166 | insert_ptr = (tx_queue->insert_count & | 209 | insert_ptr = (tx_queue->insert_count & |
| 167 | efx->type->txd_ring_mask); | 210 | efx->type->txd_ring_mask); |
| 168 | buffer = &tx_queue->buffer[insert_ptr]; | 211 | buffer = &tx_queue->buffer[insert_ptr]; |
| 212 | efx_tsoh_free(tx_queue, buffer); | ||
| 213 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
| 169 | EFX_BUG_ON_PARANOID(buffer->skb); | 214 | EFX_BUG_ON_PARANOID(buffer->skb); |
| 170 | EFX_BUG_ON_PARANOID(buffer->len); | 215 | EFX_BUG_ON_PARANOID(buffer->len); |
| 171 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | 216 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); |
| @@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
| 432 | 477 | ||
| 433 | efx_release_tx_buffers(tx_queue); | 478 | efx_release_tx_buffers(tx_queue); |
| 434 | 479 | ||
| 480 | /* Free up TSO header cache */ | ||
| 481 | efx_fini_tso(tx_queue); | ||
| 482 | |||
| 435 | /* Release queue's stop on port, if any */ | 483 | /* Release queue's stop on port, if any */ |
| 436 | if (tx_queue->stopped) { | 484 | if (tx_queue->stopped) { |
| 437 | tx_queue->stopped = 0; | 485 | tx_queue->stopped = 0; |
| @@ -450,3 +498,619 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
| 450 | } | 498 | } |
| 451 | 499 | ||
| 452 | 500 | ||
| 501 | /* Efx TCP segmentation acceleration. | ||
| 502 | * | ||
| 503 | * Why? Because by doing it here in the driver we can go significantly | ||
| 504 | * faster than the GSO. | ||
| 505 | * | ||
| 506 | * Requires TX checksum offload support. | ||
| 507 | */ | ||
| 508 | |||
| 509 | /* Number of bytes inserted at the start of a TSO header buffer, | ||
| 510 | * similar to NET_IP_ALIGN. | ||
| 511 | */ | ||
| 512 | #if defined(__i386__) || defined(__x86_64__) | ||
| 513 | #define TSOH_OFFSET 0 | ||
| 514 | #else | ||
| 515 | #define TSOH_OFFSET NET_IP_ALIGN | ||
| 516 | #endif | ||
| 517 | |||
| 518 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) | ||
| 519 | |||
| 520 | /* Total size of struct efx_tso_header, buffer and padding */ | ||
| 521 | #define TSOH_SIZE(hdr_len) \ | ||
| 522 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | ||
| 523 | |||
| 524 | /* Size of blocks on free list. Larger blocks must be allocated from | ||
| 525 | * the heap. | ||
| 526 | */ | ||
| 527 | #define TSOH_STD_SIZE 128 | ||
| 528 | |||
| 529 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | ||
| 530 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | ||
| 531 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | ||
| 532 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | ||
| 533 | |||
| 534 | /** | ||
| 535 | * struct tso_state - TSO state for an SKB | ||
| 536 | * @remaining_len: Bytes of data we've yet to segment | ||
| 537 | * @seqnum: Current sequence number | ||
| 538 | * @packet_space: Remaining space in current packet | ||
| 539 | * @ifc: Input fragment cursor. | ||
| 540 | * Where we are in the current fragment of the incoming SKB. These | ||
| 541 | * values get updated in place when we split a fragment over | ||
| 542 | * multiple packets. | ||
| 543 | * @p: Parameters. | ||
| 544 | * These values are set once at the start of the TSO send and do | ||
| 545 | * not get changed as the routine progresses. | ||
| 546 | * | ||
| 547 | * The state used during segmentation. It is put into this data structure | ||
| 548 | * just to make it easy to pass into inline functions. | ||
| 549 | */ | ||
| 550 | struct tso_state { | ||
| 551 | unsigned remaining_len; | ||
| 552 | unsigned seqnum; | ||
| 553 | unsigned packet_space; | ||
| 554 | |||
| 555 | struct { | ||
| 556 | /* DMA address of current position */ | ||
| 557 | dma_addr_t dma_addr; | ||
| 558 | /* Remaining length */ | ||
| 559 | unsigned int len; | ||
| 560 | /* DMA address and length of the whole fragment */ | ||
| 561 | unsigned int unmap_len; | ||
| 562 | dma_addr_t unmap_addr; | ||
| 563 | struct page *page; | ||
| 564 | unsigned page_off; | ||
| 565 | } ifc; | ||
| 566 | |||
| 567 | struct { | ||
| 568 | /* The number of bytes of header */ | ||
| 569 | unsigned int header_length; | ||
| 570 | |||
| 571 | /* The number of bytes to put in each outgoing segment. */ | ||
| 572 | int full_packet_size; | ||
| 573 | |||
| 574 | /* Current IPv4 ID, host endian. */ | ||
| 575 | unsigned ipv4_id; | ||
| 576 | } p; | ||
| 577 | }; | ||
| 578 | |||
| 579 | |||
| 580 | /* | ||
| 581 | * Verify that our various assumptions about sk_buffs and the conditions | ||
| 582 | * under which TSO will be attempted hold true. | ||
| 583 | */ | ||
| 584 | static inline void efx_tso_check_safe(const struct sk_buff *skb) | ||
| 585 | { | ||
| 586 | EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); | ||
| 587 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != | ||
| 588 | skb->protocol); | ||
| 589 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | ||
| 590 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) | ||
| 591 | + (tcp_hdr(skb)->doff << 2u)) > | ||
| 592 | skb_headlen(skb)); | ||
| 593 | } | ||
| 594 | |||
| 595 | |||
| 596 | /* | ||
| 597 | * Allocate a page worth of efx_tso_header structures, and string them | ||
| 598 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | ||
| 599 | */ | ||
| 600 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | ||
| 601 | { | ||
| 602 | |||
| 603 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
| 604 | struct efx_tso_header *tsoh; | ||
| 605 | dma_addr_t dma_addr; | ||
| 606 | u8 *base_kva, *kva; | ||
| 607 | |||
| 608 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | ||
| 609 | if (base_kva == NULL) { | ||
| 610 | EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" | ||
| 611 | " headers\n"); | ||
| 612 | return -ENOMEM; | ||
| 613 | } | ||
| 614 | |||
| 615 | /* pci_alloc_consistent() allocates pages. */ | ||
| 616 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | ||
| 617 | |||
| 618 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | ||
| 619 | tsoh = (struct efx_tso_header *)kva; | ||
| 620 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | ||
| 621 | tsoh->next = tx_queue->tso_headers_free; | ||
| 622 | tx_queue->tso_headers_free = tsoh; | ||
| 623 | } | ||
| 624 | |||
| 625 | return 0; | ||
| 626 | } | ||
| 627 | |||
| 628 | |||
| 629 | /* Free up a TSO header, and all others in the same page. */ | ||
| 630 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | ||
| 631 | struct efx_tso_header *tsoh, | ||
| 632 | struct pci_dev *pci_dev) | ||
| 633 | { | ||
| 634 | struct efx_tso_header **p; | ||
| 635 | unsigned long base_kva; | ||
| 636 | dma_addr_t base_dma; | ||
| 637 | |||
| 638 | base_kva = (unsigned long)tsoh & PAGE_MASK; | ||
| 639 | base_dma = tsoh->dma_addr & PAGE_MASK; | ||
| 640 | |||
| 641 | p = &tx_queue->tso_headers_free; | ||
| 642 | while (*p != NULL) | ||
| 643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | ||
| 644 | *p = (*p)->next; | ||
| 645 | else | ||
| 646 | p = &(*p)->next; | ||
| 647 | |||
| 648 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | ||
| 649 | } | ||
| 650 | |||
| 651 | static struct efx_tso_header * | ||
| 652 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | ||
| 653 | { | ||
| 654 | struct efx_tso_header *tsoh; | ||
| 655 | |||
| 656 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | ||
| 657 | if (unlikely(!tsoh)) | ||
| 658 | return NULL; | ||
| 659 | |||
| 660 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | ||
| 661 | TSOH_BUFFER(tsoh), header_len, | ||
| 662 | PCI_DMA_TODEVICE); | ||
| 663 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | ||
| 664 | kfree(tsoh); | ||
| 665 | return NULL; | ||
| 666 | } | ||
| 667 | |||
| 668 | tsoh->unmap_len = header_len; | ||
| 669 | return tsoh; | ||
| 670 | } | ||
| 671 | |||
| 672 | static void | ||
| 673 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | ||
| 674 | { | ||
| 675 | pci_unmap_single(tx_queue->efx->pci_dev, | ||
| 676 | tsoh->dma_addr, tsoh->unmap_len, | ||
| 677 | PCI_DMA_TODEVICE); | ||
| 678 | kfree(tsoh); | ||
| 679 | } | ||
| 680 | |||
| 681 | /** | ||
| 682 | * efx_tx_queue_insert - push descriptors onto the TX queue | ||
| 683 | * @tx_queue: Efx TX queue | ||
| 684 | * @dma_addr: DMA address of fragment | ||
| 685 | * @len: Length of fragment | ||
| 686 | * @skb: Only non-null for end of last segment | ||
| 687 | * @end_of_packet: True if last fragment in a packet | ||
| 688 | * @unmap_addr: DMA address of fragment for unmapping | ||
| 689 | * @unmap_len: Only set this in last segment of a fragment | ||
| 690 | * | ||
| 691 | * Push descriptors onto the TX queue. Return 0 on success or 1 if | ||
| 692 | * @tx_queue full. | ||
| 693 | */ | ||
| 694 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | ||
| 695 | dma_addr_t dma_addr, unsigned len, | ||
| 696 | const struct sk_buff *skb, int end_of_packet, | ||
| 697 | dma_addr_t unmap_addr, unsigned unmap_len) | ||
| 698 | { | ||
| 699 | struct efx_tx_buffer *buffer; | ||
| 700 | struct efx_nic *efx = tx_queue->efx; | ||
| 701 | unsigned dma_len, fill_level, insert_ptr, misalign; | ||
| 702 | int q_space; | ||
| 703 | |||
| 704 | EFX_BUG_ON_PARANOID(len <= 0); | ||
| 705 | |||
| 706 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | ||
| 707 | /* -1 as there is no way to represent all descriptors used */ | ||
| 708 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | ||
| 709 | |||
| 710 | while (1) { | ||
| 711 | if (unlikely(q_space-- <= 0)) { | ||
| 712 | /* It might be that completions have happened | ||
| 713 | * since the xmit path last checked. Update | ||
| 714 | * the xmit path's copy of read_count. | ||
| 715 | */ | ||
| 716 | ++tx_queue->stopped; | ||
| 717 | /* This memory barrier protects the change of | ||
| 718 | * stopped from the access of read_count. */ | ||
| 719 | smp_mb(); | ||
| 720 | tx_queue->old_read_count = | ||
| 721 | *(volatile unsigned *)&tx_queue->read_count; | ||
| 722 | fill_level = (tx_queue->insert_count | ||
| 723 | - tx_queue->old_read_count); | ||
| 724 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | ||
| 725 | if (unlikely(q_space-- <= 0)) | ||
| 726 | return 1; | ||
| 727 | smp_mb(); | ||
| 728 | --tx_queue->stopped; | ||
| 729 | } | ||
| 730 | |||
| 731 | insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; | ||
| 732 | buffer = &tx_queue->buffer[insert_ptr]; | ||
| 733 | ++tx_queue->insert_count; | ||
| 734 | |||
| 735 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | ||
| 736 | tx_queue->read_count > | ||
| 737 | efx->type->txd_ring_mask); | ||
| 738 | |||
| 739 | efx_tsoh_free(tx_queue, buffer); | ||
| 740 | EFX_BUG_ON_PARANOID(buffer->len); | ||
| 741 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
| 742 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
| 743 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | ||
| 744 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
| 745 | |||
| 746 | buffer->dma_addr = dma_addr; | ||
| 747 | |||
| 748 | /* Ensure we do not cross a boundary unsupported by H/W */ | ||
| 749 | dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; | ||
| 750 | |||
| 751 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
| 752 | if (misalign && dma_len + misalign > 512) | ||
| 753 | dma_len = 512 - misalign; | ||
| 754 | |||
| 755 | /* If there is enough space to send then do so */ | ||
| 756 | if (dma_len >= len) | ||
| 757 | break; | ||
| 758 | |||
| 759 | buffer->len = dma_len; /* Don't set the other members */ | ||
| 760 | dma_addr += dma_len; | ||
| 761 | len -= dma_len; | ||
| 762 | } | ||
| 763 | |||
| 764 | EFX_BUG_ON_PARANOID(!len); | ||
| 765 | buffer->len = len; | ||
| 766 | buffer->skb = skb; | ||
| 767 | buffer->continuation = !end_of_packet; | ||
| 768 | buffer->unmap_addr = unmap_addr; | ||
| 769 | buffer->unmap_len = unmap_len; | ||
| 770 | return 0; | ||
| 771 | } | ||
| 772 | |||
| 773 | |||
| 774 | /* | ||
| 775 | * Put a TSO header into the TX queue. | ||
| 776 | * | ||
| 777 | * This is special-cased because we know that it is small enough to fit in | ||
| 778 | * a single fragment, and we know it doesn't cross a page boundary. It | ||
| 779 | * also allows us to not worry about end-of-packet etc. | ||
| 780 | */ | ||
| 781 | static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, | ||
| 782 | struct efx_tso_header *tsoh, unsigned len) | ||
| 783 | { | ||
| 784 | struct efx_tx_buffer *buffer; | ||
| 785 | |||
| 786 | buffer = &tx_queue->buffer[tx_queue->insert_count & | ||
| 787 | tx_queue->efx->type->txd_ring_mask]; | ||
| 788 | efx_tsoh_free(tx_queue, buffer); | ||
| 789 | EFX_BUG_ON_PARANOID(buffer->len); | ||
| 790 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
| 791 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
| 792 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | ||
| 793 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
| 794 | buffer->len = len; | ||
| 795 | buffer->dma_addr = tsoh->dma_addr; | ||
| 796 | buffer->tsoh = tsoh; | ||
| 797 | |||
| 798 | ++tx_queue->insert_count; | ||
| 799 | } | ||
| 800 | |||
| 801 | |||
| 802 | /* Remove descriptors put into a tx_queue. */ | ||
| 803 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | ||
| 804 | { | ||
| 805 | struct efx_tx_buffer *buffer; | ||
| 806 | |||
| 807 | /* Work backwards until we hit the original insert pointer value */ | ||
| 808 | while (tx_queue->insert_count != tx_queue->write_count) { | ||
| 809 | --tx_queue->insert_count; | ||
| 810 | buffer = &tx_queue->buffer[tx_queue->insert_count & | ||
| 811 | tx_queue->efx->type->txd_ring_mask]; | ||
| 812 | efx_tsoh_free(tx_queue, buffer); | ||
| 813 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
| 814 | buffer->len = 0; | ||
| 815 | buffer->continuation = 1; | ||
| 816 | if (buffer->unmap_len) { | ||
| 817 | pci_unmap_page(tx_queue->efx->pci_dev, | ||
| 818 | buffer->unmap_addr, | ||
| 819 | buffer->unmap_len, PCI_DMA_TODEVICE); | ||
| 820 | buffer->unmap_len = 0; | ||
| 821 | } | ||
| 822 | } | ||
| 823 | } | ||
| 824 | |||
| 825 | |||
| 826 | /* Parse the SKB header and initialise state. */ | ||
| 827 | static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) | ||
| 828 | { | ||
| 829 | /* All ethernet/IP/TCP headers combined size is TCP header size | ||
| 830 | * plus offset of TCP header relative to start of packet. | ||
| 831 | */ | ||
| 832 | st->p.header_length = ((tcp_hdr(skb)->doff << 2u) | ||
| 833 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | ||
| 834 | st->p.full_packet_size = (st->p.header_length | ||
| 835 | + skb_shinfo(skb)->gso_size); | ||
| 836 | |||
| 837 | st->p.ipv4_id = ntohs(ip_hdr(skb)->id); | ||
| 838 | st->seqnum = ntohl(tcp_hdr(skb)->seq); | ||
| 839 | |||
| 840 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | ||
| 841 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | ||
| 842 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | ||
| 843 | |||
| 844 | st->packet_space = st->p.full_packet_size; | ||
| 845 | st->remaining_len = skb->len - st->p.header_length; | ||
| 846 | } | ||
| 847 | |||
| 848 | |||
| 849 | /** | ||
| 850 | * tso_get_fragment - record fragment details and map for DMA | ||
| 851 | * @st: TSO state | ||
| 852 | * @efx: Efx NIC | ||
| 853 | * @data: Pointer to fragment data | ||
| 854 | * @len: Length of fragment | ||
| 855 | * | ||
| 856 | * Record fragment details and map for DMA. Return 0 on success, or | ||
| 857 | * -%ENOMEM if DMA mapping fails. | ||
| 858 | */ | ||
| 859 | static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | ||
| 860 | int len, struct page *page, int page_off) | ||
| 861 | { | ||
| 862 | |||
| 863 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | ||
| 864 | len, PCI_DMA_TODEVICE); | ||
| 865 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | ||
| 866 | st->ifc.unmap_len = len; | ||
| 867 | st->ifc.len = len; | ||
| 868 | st->ifc.dma_addr = st->ifc.unmap_addr; | ||
| 869 | st->ifc.page = page; | ||
| 870 | st->ifc.page_off = page_off; | ||
| 871 | return 0; | ||
| 872 | } | ||
| 873 | return -ENOMEM; | ||
| 874 | } | ||
| 875 | |||
| 876 | |||
| 877 | /** | ||
| 878 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | ||
| 879 | * @tx_queue: Efx TX queue | ||
| 880 | * @skb: Socket buffer | ||
| 881 | * @st: TSO state | ||
| 882 | * | ||
| 883 | * Form descriptors for the current fragment, until we reach the end | ||
| 884 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough | ||
| 885 | * space in @tx_queue. | ||
| 886 | */ | ||
| 887 | static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | ||
| 888 | const struct sk_buff *skb, | ||
| 889 | struct tso_state *st) | ||
| 890 | { | ||
| 891 | |||
| 892 | int n, end_of_packet, rc; | ||
| 893 | |||
| 894 | if (st->ifc.len == 0) | ||
| 895 | return 0; | ||
| 896 | if (st->packet_space == 0) | ||
| 897 | return 0; | ||
| 898 | |||
| 899 | EFX_BUG_ON_PARANOID(st->ifc.len <= 0); | ||
| 900 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); | ||
| 901 | |||
| 902 | n = min(st->ifc.len, st->packet_space); | ||
| 903 | |||
| 904 | st->packet_space -= n; | ||
| 905 | st->remaining_len -= n; | ||
| 906 | st->ifc.len -= n; | ||
| 907 | st->ifc.page_off += n; | ||
| 908 | end_of_packet = st->remaining_len == 0 || st->packet_space == 0; | ||
| 909 | |||
| 910 | rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, | ||
| 911 | st->remaining_len ? NULL : skb, | ||
| 912 | end_of_packet, st->ifc.unmap_addr, | ||
| 913 | st->ifc.len ? 0 : st->ifc.unmap_len); | ||
| 914 | |||
| 915 | st->ifc.dma_addr += n; | ||
| 916 | |||
| 917 | return rc; | ||
| 918 | } | ||
| 919 | |||
| 920 | |||
| 921 | /** | ||
| 922 | * tso_start_new_packet - generate a new header and prepare for the new packet | ||
| 923 | * @tx_queue: Efx TX queue | ||
| 924 | * @skb: Socket buffer | ||
| 925 | * @st: TSO state | ||
| 926 | * | ||
| 927 | * Generate a new header and prepare for the new packet. Return 0 on | ||
| 928 | * success, or -1 if failed to alloc header. | ||
| 929 | */ | ||
| 930 | static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | ||
| 931 | const struct sk_buff *skb, | ||
| 932 | struct tso_state *st) | ||
| 933 | { | ||
| 934 | struct efx_tso_header *tsoh; | ||
| 935 | struct iphdr *tsoh_iph; | ||
| 936 | struct tcphdr *tsoh_th; | ||
| 937 | unsigned ip_length; | ||
| 938 | u8 *header; | ||
| 939 | |||
| 940 | /* Allocate a DMA-mapped header buffer. */ | ||
| 941 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | ||
| 942 | if (tx_queue->tso_headers_free == NULL) | ||
| 943 | if (efx_tsoh_block_alloc(tx_queue)) | ||
| 944 | return -1; | ||
| 945 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | ||
| 946 | tsoh = tx_queue->tso_headers_free; | ||
| 947 | tx_queue->tso_headers_free = tsoh->next; | ||
| 948 | tsoh->unmap_len = 0; | ||
| 949 | } else { | ||
| 950 | tx_queue->tso_long_headers++; | ||
| 951 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); | ||
| 952 | if (unlikely(!tsoh)) | ||
| 953 | return -1; | ||
| 954 | } | ||
| 955 | |||
| 956 | header = TSOH_BUFFER(tsoh); | ||
| 957 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | ||
| 958 | tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | ||
| 959 | |||
| 960 | /* Copy and update the headers. */ | ||
| 961 | memcpy(header, skb->data, st->p.header_length); | ||
| 962 | |||
| 963 | tsoh_th->seq = htonl(st->seqnum); | ||
| 964 | st->seqnum += skb_shinfo(skb)->gso_size; | ||
| 965 | if (st->remaining_len > skb_shinfo(skb)->gso_size) { | ||
| 966 | /* This packet will not finish the TSO burst. */ | ||
| 967 | ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); | ||
| 968 | tsoh_th->fin = 0; | ||
| 969 | tsoh_th->psh = 0; | ||
| 970 | } else { | ||
| 971 | /* This packet will be the last in the TSO burst. */ | ||
| 972 | ip_length = (st->p.header_length - ETH_HDR_LEN(skb) | ||
| 973 | + st->remaining_len); | ||
| 974 | tsoh_th->fin = tcp_hdr(skb)->fin; | ||
| 975 | tsoh_th->psh = tcp_hdr(skb)->psh; | ||
| 976 | } | ||
| 977 | tsoh_iph->tot_len = htons(ip_length); | ||
| 978 | |||
| 979 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | ||
| 980 | tsoh_iph->id = htons(st->p.ipv4_id); | ||
| 981 | st->p.ipv4_id++; | ||
| 982 | |||
| 983 | st->packet_space = skb_shinfo(skb)->gso_size; | ||
| 984 | ++tx_queue->tso_packets; | ||
| 985 | |||
| 986 | /* Form a descriptor for this header. */ | ||
| 987 | efx_tso_put_header(tx_queue, tsoh, st->p.header_length); | ||
| 988 | |||
| 989 | return 0; | ||
| 990 | } | ||
| 991 | |||
| 992 | |||
| 993 | /** | ||
| 994 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | ||
| 995 | * @tx_queue: Efx TX queue | ||
| 996 | * @skb: Socket buffer | ||
| 997 | * | ||
| 998 | * Context: You must hold netif_tx_lock() to call this function. | ||
| 999 | * | ||
| 1000 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | ||
| 1001 | * @skb was not enqueued. In all cases @skb is consumed. Return | ||
| 1002 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | ||
| 1003 | */ | ||
| 1004 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
| 1005 | const struct sk_buff *skb) | ||
| 1006 | { | ||
| 1007 | int frag_i, rc, rc2 = NETDEV_TX_OK; | ||
| 1008 | struct tso_state state; | ||
| 1009 | skb_frag_t *f; | ||
| 1010 | |||
| 1011 | /* Verify TSO is safe - these checks should never fail. */ | ||
| 1012 | efx_tso_check_safe(skb); | ||
| 1013 | |||
| 1014 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | ||
| 1015 | |||
| 1016 | tso_start(&state, skb); | ||
| 1017 | |||
| 1018 | /* Assume that skb header area contains exactly the headers, and | ||
| 1019 | * all payload is in the frag list. | ||
| 1020 | */ | ||
| 1021 | if (skb_headlen(skb) == state.p.header_length) { | ||
| 1022 | /* Grab the first payload fragment. */ | ||
| 1023 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | ||
| 1024 | frag_i = 0; | ||
| 1025 | f = &skb_shinfo(skb)->frags[frag_i]; | ||
| 1026 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
| 1027 | f->size, f->page, f->page_offset); | ||
| 1028 | if (rc) | ||
| 1029 | goto mem_err; | ||
| 1030 | } else { | ||
| 1031 | /* It may look like this code fragment assumes that the | ||
| 1032 | * skb->data portion does not cross a page boundary, but | ||
| 1033 | * that is not the case. It is guaranteed to be direct | ||
| 1034 | * mapped memory, and therefore is physically contiguous, | ||
| 1035 | * and so DMA will work fine. kmap_atomic() on this region | ||
| 1036 | * will just return the direct mapping, so that will work | ||
| 1037 | * too. | ||
| 1038 | */ | ||
| 1039 | int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); | ||
| 1040 | int hl = state.p.header_length; | ||
| 1041 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
| 1042 | skb_headlen(skb) - hl, | ||
| 1043 | virt_to_page(skb->data), page_off + hl); | ||
| 1044 | if (rc) | ||
| 1045 | goto mem_err; | ||
| 1046 | frag_i = -1; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
| 1050 | goto mem_err; | ||
| 1051 | |||
| 1052 | while (1) { | ||
| 1053 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | ||
| 1054 | if (unlikely(rc)) | ||
| 1055 | goto stop; | ||
| 1056 | |||
| 1057 | /* Move onto the next fragment? */ | ||
| 1058 | if (state.ifc.len == 0) { | ||
| 1059 | if (++frag_i >= skb_shinfo(skb)->nr_frags) | ||
| 1060 | /* End of payload reached. */ | ||
| 1061 | break; | ||
| 1062 | f = &skb_shinfo(skb)->frags[frag_i]; | ||
| 1063 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
| 1064 | f->size, f->page, f->page_offset); | ||
| 1065 | if (rc) | ||
| 1066 | goto mem_err; | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | /* Start at new packet? */ | ||
| 1070 | if (state.packet_space == 0 && | ||
| 1071 | tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
| 1072 | goto mem_err; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | /* Pass off to hardware */ | ||
| 1076 | falcon_push_buffers(tx_queue); | ||
| 1077 | |||
| 1078 | tx_queue->tso_bursts++; | ||
| 1079 | return NETDEV_TX_OK; | ||
| 1080 | |||
| 1081 | mem_err: | ||
| 1082 | EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" | ||
| 1083 | " error\n"); | ||
| 1084 | dev_kfree_skb_any((struct sk_buff *)skb); | ||
| 1085 | goto unwind; | ||
| 1086 | |||
| 1087 | stop: | ||
| 1088 | rc2 = NETDEV_TX_BUSY; | ||
| 1089 | |||
| 1090 | /* Stop the queue if it wasn't stopped before. */ | ||
| 1091 | if (tx_queue->stopped == 1) | ||
| 1092 | efx_stop_queue(tx_queue->efx); | ||
| 1093 | |||
| 1094 | unwind: | ||
| 1095 | efx_enqueue_unwind(tx_queue); | ||
| 1096 | return rc2; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | |||
| 1100 | /* | ||
| 1101 | * Free up all TSO datastructures associated with tx_queue. This | ||
| 1102 | * routine should be called only once the tx_queue is both empty and | ||
| 1103 | * will no longer be used. | ||
| 1104 | */ | ||
| 1105 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | ||
| 1106 | { | ||
| 1107 | unsigned i; | ||
| 1108 | |||
| 1109 | if (tx_queue->buffer) | ||
| 1110 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | ||
| 1111 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | ||
| 1112 | |||
| 1113 | while (tx_queue->tso_headers_free != NULL) | ||
| 1114 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | ||
| 1115 | tx_queue->efx->pci_dev); | ||
| 1116 | } | ||
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 66dd5bf1eaa9..3b9f9ddbc372 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c | |||
| @@ -24,6 +24,10 @@ | |||
| 24 | MDIO_MMDREG_DEVS0_PMAPMD | \ | 24 | MDIO_MMDREG_DEVS0_PMAPMD | \ |
| 25 | MDIO_MMDREG_DEVS0_PHYXS) | 25 | MDIO_MMDREG_DEVS0_PHYXS) |
| 26 | 26 | ||
| 27 | #define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
| 28 | (1 << LOOPBACK_PMAPMD) | \ | ||
| 29 | (1 << LOOPBACK_NETWORK)) | ||
| 30 | |||
| 27 | /****************************************************************************/ | 31 | /****************************************************************************/ |
| 28 | /* Quake-specific MDIO registers */ | 32 | /* Quake-specific MDIO registers */ |
| 29 | #define MDIO_QUAKE_LED0_REG (0xD006) | 33 | #define MDIO_QUAKE_LED0_REG (0xD006) |
| @@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode) | |||
| 35 | mode); | 39 | mode); |
| 36 | } | 40 | } |
| 37 | 41 | ||
| 42 | struct xfp_phy_data { | ||
| 43 | int tx_disabled; | ||
| 44 | }; | ||
| 45 | |||
| 38 | #define XFP_MAX_RESET_TIME 500 | 46 | #define XFP_MAX_RESET_TIME 500 |
| 39 | #define XFP_RESET_WAIT 10 | 47 | #define XFP_RESET_WAIT 10 |
| 40 | 48 | ||
| @@ -72,18 +80,31 @@ static int xfp_reset_phy(struct efx_nic *efx) | |||
| 72 | 80 | ||
| 73 | static int xfp_phy_init(struct efx_nic *efx) | 81 | static int xfp_phy_init(struct efx_nic *efx) |
| 74 | { | 82 | { |
| 83 | struct xfp_phy_data *phy_data; | ||
| 75 | u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); | 84 | u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); |
| 76 | int rc; | 85 | int rc; |
| 77 | 86 | ||
| 87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | ||
| 88 | efx->phy_data = (void *) phy_data; | ||
| 89 | |||
| 78 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | 90 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" |
| 79 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | 91 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), |
| 80 | MDIO_ID_REV(devid)); | 92 | MDIO_ID_REV(devid)); |
| 81 | 93 | ||
| 94 | phy_data->tx_disabled = efx->tx_disabled; | ||
| 95 | |||
| 82 | rc = xfp_reset_phy(efx); | 96 | rc = xfp_reset_phy(efx); |
| 83 | 97 | ||
| 84 | EFX_INFO(efx, "XFP: PHY init %s.\n", | 98 | EFX_INFO(efx, "XFP: PHY init %s.\n", |
| 85 | rc ? "failed" : "successful"); | 99 | rc ? "failed" : "successful"); |
| 100 | if (rc < 0) | ||
| 101 | goto fail; | ||
| 86 | 102 | ||
| 103 | return 0; | ||
| 104 | |||
| 105 | fail: | ||
| 106 | kfree(efx->phy_data); | ||
| 107 | efx->phy_data = NULL; | ||
| 87 | return rc; | 108 | return rc; |
| 88 | } | 109 | } |
| 89 | 110 | ||
| @@ -110,6 +131,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx) | |||
| 110 | 131 | ||
| 111 | static void xfp_phy_reconfigure(struct efx_nic *efx) | 132 | static void xfp_phy_reconfigure(struct efx_nic *efx) |
| 112 | { | 133 | { |
| 134 | struct xfp_phy_data *phy_data = efx->phy_data; | ||
| 135 | |||
| 136 | /* Reset the PHY when moving from tx off to tx on */ | ||
| 137 | if (phy_data->tx_disabled && !efx->tx_disabled) | ||
| 138 | xfp_reset_phy(efx); | ||
| 139 | |||
| 140 | mdio_clause45_transmit_disable(efx); | ||
| 141 | mdio_clause45_phy_reconfigure(efx); | ||
| 142 | |||
| 143 | phy_data->tx_disabled = efx->tx_disabled; | ||
| 113 | efx->link_up = xfp_link_ok(efx); | 144 | efx->link_up = xfp_link_ok(efx); |
| 114 | efx->link_options = GM_LPA_10000FULL; | 145 | efx->link_options = GM_LPA_10000FULL; |
| 115 | } | 146 | } |
| @@ -119,6 +150,10 @@ static void xfp_phy_fini(struct efx_nic *efx) | |||
| 119 | { | 150 | { |
| 120 | /* Clobber the LED if it was blinking */ | 151 | /* Clobber the LED if it was blinking */ |
| 121 | efx->board_info.blink(efx, 0); | 152 | efx->board_info.blink(efx, 0); |
| 153 | |||
| 154 | /* Free the context block */ | ||
| 155 | kfree(efx->phy_data); | ||
| 156 | efx->phy_data = NULL; | ||
| 122 | } | 157 | } |
| 123 | 158 | ||
| 124 | struct efx_phy_operations falcon_xfp_phy_ops = { | 159 | struct efx_phy_operations falcon_xfp_phy_ops = { |
| @@ -129,4 +164,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = { | |||
| 129 | .clear_interrupt = xfp_phy_clear_interrupt, | 164 | .clear_interrupt = xfp_phy_clear_interrupt, |
| 130 | .reset_xaui = efx_port_dummy_op_void, | 165 | .reset_xaui = efx_port_dummy_op_void, |
| 131 | .mmds = XFP_REQUIRED_DEVS, | 166 | .mmds = XFP_REQUIRED_DEVS, |
| 167 | .loopbacks = XFP_LOOPBACKS, | ||
| 132 | }; | 168 | }; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 7bb3ba9bcbd8..c0a5eea20007 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
| @@ -1966,13 +1966,13 @@ struct sky2_status_le { | |||
| 1966 | struct tx_ring_info { | 1966 | struct tx_ring_info { |
| 1967 | struct sk_buff *skb; | 1967 | struct sk_buff *skb; |
| 1968 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1968 | DECLARE_PCI_UNMAP_ADDR(mapaddr); |
| 1969 | DECLARE_PCI_UNMAP_ADDR(maplen); | 1969 | DECLARE_PCI_UNMAP_LEN(maplen); |
| 1970 | }; | 1970 | }; |
| 1971 | 1971 | ||
| 1972 | struct rx_ring_info { | 1972 | struct rx_ring_info { |
| 1973 | struct sk_buff *skb; | 1973 | struct sk_buff *skb; |
| 1974 | dma_addr_t data_addr; | 1974 | dma_addr_t data_addr; |
| 1975 | DECLARE_PCI_UNMAP_ADDR(data_size); | 1975 | DECLARE_PCI_UNMAP_LEN(data_size); |
| 1976 | dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; | 1976 | dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; |
| 1977 | }; | 1977 | }; |
| 1978 | 1978 | ||
