aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
committerJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
commit0a9d59a2461477bd9ed143c01af9df3f8f00fa81 (patch)
treedf997d1cfb0786427a0df1fbd6f0640fa4248cf4 /drivers/net
parenta23ce6da9677d245aa0aadc99f4197030350ab54 (diff)
parent795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 (diff)
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/arm/ks8695net.c290
-rw-r--r--drivers/net/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/benet/be_cmds.c5
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/bfin_mac.c9
-rw-r--r--drivers/net/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/bnx2.c21
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c234
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c59
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/bonding/bond_3ad.c4
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c893
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/cnic.c12
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c2
-rw-r--r--drivers/net/e1000e/lib.c20
-rw-r--r--drivers/net/e1000e/netdev.c224
-rw-r--r--drivers/net/e1000e/param.c6
-rw-r--r--drivers/net/e1000e/phy.c4
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/gianfar.h10
-rw-r--r--drivers/net/greth.c221
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/mlx4/catas.c6
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/main.c17
-rw-r--r--drivers/net/mlx4/mcg.c23
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/niu.c61
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/r8169.c84
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/falcon.c25
-rw-r--r--drivers/net/sfc/net_driver.h10
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/tg3.c95
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tile/tilepro.c10
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c246
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c2
-rw-r--r--drivers/net/vxge/vxge-main.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c40
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c11
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wl12xx/spi.c3
-rw-r--r--drivers/net/xen-netfront.c96
122 files changed, 3859 insertions, 1183 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4c8bfc97fb4c..03823327db25 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2864,7 +2864,7 @@ config MLX4_CORE
2864 default n 2864 default n
2865 2865
2866config MLX4_DEBUG 2866config MLX4_DEBUG
2867 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) 2867 bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
2868 depends on MLX4_CORE 2868 depends on MLX4_CORE
2869 default y 2869 default y
2870 ---help--- 2870 ---help---
@@ -3389,8 +3389,7 @@ config NETCONSOLE
3389 3389
3390config NETCONSOLE_DYNAMIC 3390config NETCONSOLE_DYNAMIC
3391 bool "Dynamic reconfiguration of logging targets" 3391 bool "Dynamic reconfiguration of logging targets"
3392 depends on NETCONSOLE && SYSFS 3392 depends on NETCONSOLE && SYSFS && CONFIGFS_FS
3393 select CONFIGFS_FS
3394 help 3393 help
3395 This option enables the ability to dynamically reconfigure target 3394 This option enables the ability to dynamically reconfigure target
3396 parameters (interface, IP addresses, port numbers, MAC addresses) 3395 parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d849cf25..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
854} 854}
855 855
856/** 856/**
857 * ks8695_get_settings - Get device-specific settings. 857 * ks8695_wan_get_settings - Get device-specific settings.
858 * @ndev: The network device to read settings from 858 * @ndev: The network device to read settings from
859 * @cmd: The ethtool structure to read into 859 * @cmd: The ethtool structure to read into
860 */ 860 */
861static int 861static int
862ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 862ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
863{ 863{
864 struct ks8695_priv *ksp = netdev_priv(ndev); 864 struct ks8695_priv *ksp = netdev_priv(ndev);
865 u32 ctrl; 865 u32 ctrl;
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
870 SUPPORTED_TP | SUPPORTED_MII); 870 SUPPORTED_TP | SUPPORTED_MII);
871 cmd->transceiver = XCVR_INTERNAL; 871 cmd->transceiver = XCVR_INTERNAL;
872 872
873 /* Port specific extras */ 873 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
874 switch (ksp->dtype) { 874 cmd->port = PORT_MII;
875 case KS8695_DTYPE_HPNA: 875 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
876 cmd->phy_address = 0; 876 cmd->phy_address = 0;
877 /* not supported for HPNA */
878 cmd->autoneg = AUTONEG_DISABLE;
879 877
880 /* BUG: Erm, dtype hpna implies no phy regs */ 878 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881 /* 879 if ((ctrl & WMC_WAND) == 0) {
882 ctrl = readl(KS8695_MISC_VA + KS8695_HMC); 880 /* auto-negotiation is enabled */
883 cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; 881 cmd->advertising |= ADVERTISED_Autoneg;
884 cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; 882 if (ctrl & WMC_WANA100F)
885 */ 883 cmd->advertising |= ADVERTISED_100baseT_Full;
886 return -EOPNOTSUPP; 884 if (ctrl & WMC_WANA100H)
887 case KS8695_DTYPE_WAN: 885 cmd->advertising |= ADVERTISED_100baseT_Half;
888 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; 886 if (ctrl & WMC_WANA10F)
889 cmd->port = PORT_MII; 887 cmd->advertising |= ADVERTISED_10baseT_Full;
890 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); 888 if (ctrl & WMC_WANA10H)
891 cmd->phy_address = 0; 889 cmd->advertising |= ADVERTISED_10baseT_Half;
890 if (ctrl & WMC_WANAP)
891 cmd->advertising |= ADVERTISED_Pause;
892 cmd->autoneg = AUTONEG_ENABLE;
893
894 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
895 cmd->duplex = (ctrl & WMC_WDS) ?
896 DUPLEX_FULL : DUPLEX_HALF;
897 } else {
898 /* auto-negotiation is disabled */
899 cmd->autoneg = AUTONEG_DISABLE;
892 900
893 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); 901 cmd->speed = (ctrl & WMC_WANF100) ?
894 if ((ctrl & WMC_WAND) == 0) { 902 SPEED_100 : SPEED_10;
895 /* auto-negotiation is enabled */ 903 cmd->duplex = (ctrl & WMC_WANFF) ?
896 cmd->advertising |= ADVERTISED_Autoneg; 904 DUPLEX_FULL : DUPLEX_HALF;
897 if (ctrl & WMC_WANA100F)
898 cmd->advertising |= ADVERTISED_100baseT_Full;
899 if (ctrl & WMC_WANA100H)
900 cmd->advertising |= ADVERTISED_100baseT_Half;
901 if (ctrl & WMC_WANA10F)
902 cmd->advertising |= ADVERTISED_10baseT_Full;
903 if (ctrl & WMC_WANA10H)
904 cmd->advertising |= ADVERTISED_10baseT_Half;
905 if (ctrl & WMC_WANAP)
906 cmd->advertising |= ADVERTISED_Pause;
907 cmd->autoneg = AUTONEG_ENABLE;
908
909 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
910 cmd->duplex = (ctrl & WMC_WDS) ?
911 DUPLEX_FULL : DUPLEX_HALF;
912 } else {
913 /* auto-negotiation is disabled */
914 cmd->autoneg = AUTONEG_DISABLE;
915
916 cmd->speed = (ctrl & WMC_WANF100) ?
917 SPEED_100 : SPEED_10;
918 cmd->duplex = (ctrl & WMC_WANFF) ?
919 DUPLEX_FULL : DUPLEX_HALF;
920 }
921 break;
922 case KS8695_DTYPE_LAN:
923 return -EOPNOTSUPP;
924 } 905 }
925 906
926 return 0; 907 return 0;
927} 908}
928 909
929/** 910/**
930 * ks8695_set_settings - Set device-specific settings. 911 * ks8695_wan_set_settings - Set device-specific settings.
931 * @ndev: The network device to configure 912 * @ndev: The network device to configure
932 * @cmd: The settings to configure 913 * @cmd: The settings to configure
933 */ 914 */
934static int 915static int
935ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 916ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
936{ 917{
937 struct ks8695_priv *ksp = netdev_priv(ndev); 918 struct ks8695_priv *ksp = netdev_priv(ndev);
938 u32 ctrl; 919 u32 ctrl;
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
956 ADVERTISED_100baseT_Full)) == 0) 937 ADVERTISED_100baseT_Full)) == 0)
957 return -EINVAL; 938 return -EINVAL;
958 939
959 switch (ksp->dtype) { 940 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
960 case KS8695_DTYPE_HPNA:
961 /* HPNA does not support auto-negotiation. */
962 return -EINVAL;
963 case KS8695_DTYPE_WAN:
964 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
965
966 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
967 WMC_WANA10F | WMC_WANA10H);
968 if (cmd->advertising & ADVERTISED_100baseT_Full)
969 ctrl |= WMC_WANA100F;
970 if (cmd->advertising & ADVERTISED_100baseT_Half)
971 ctrl |= WMC_WANA100H;
972 if (cmd->advertising & ADVERTISED_10baseT_Full)
973 ctrl |= WMC_WANA10F;
974 if (cmd->advertising & ADVERTISED_10baseT_Half)
975 ctrl |= WMC_WANA10H;
976
977 /* force a re-negotiation */
978 ctrl |= WMC_WANR;
979 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
980 break;
981 case KS8695_DTYPE_LAN:
982 return -EOPNOTSUPP;
983 }
984 941
942 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
943 WMC_WANA10F | WMC_WANA10H);
944 if (cmd->advertising & ADVERTISED_100baseT_Full)
945 ctrl |= WMC_WANA100F;
946 if (cmd->advertising & ADVERTISED_100baseT_Half)
947 ctrl |= WMC_WANA100H;
948 if (cmd->advertising & ADVERTISED_10baseT_Full)
949 ctrl |= WMC_WANA10F;
950 if (cmd->advertising & ADVERTISED_10baseT_Half)
951 ctrl |= WMC_WANA10H;
952
953 /* force a re-negotiation */
954 ctrl |= WMC_WANR;
955 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
985 } else { 956 } else {
986 switch (ksp->dtype) { 957 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
987 case KS8695_DTYPE_HPNA: 958
988 /* BUG: dtype_hpna implies no phy registers */ 959 /* disable auto-negotiation */
989 /* 960 ctrl |= WMC_WAND;
990 ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); 961 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
991 962
992 ctrl &= ~(HMC_HSS | HMC_HDS); 963 if (cmd->speed == SPEED_100)
993 if (cmd->speed == SPEED_100) 964 ctrl |= WMC_WANF100;
994 ctrl |= HMC_HSS; 965 if (cmd->duplex == DUPLEX_FULL)
995 if (cmd->duplex == DUPLEX_FULL) 966 ctrl |= WMC_WANFF;
996 ctrl |= HMC_HDS; 967
997 968 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
998 __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
999 */
1000 return -EOPNOTSUPP;
1001 case KS8695_DTYPE_WAN:
1002 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1003
1004 /* disable auto-negotiation */
1005 ctrl |= WMC_WAND;
1006 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
1007
1008 if (cmd->speed == SPEED_100)
1009 ctrl |= WMC_WANF100;
1010 if (cmd->duplex == DUPLEX_FULL)
1011 ctrl |= WMC_WANFF;
1012
1013 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1014 break;
1015 case KS8695_DTYPE_LAN:
1016 return -EOPNOTSUPP;
1017 }
1018 } 969 }
1019 970
1020 return 0; 971 return 0;
1021} 972}
1022 973
1023/** 974/**
1024 * ks8695_nwayreset - Restart the autonegotiation on the port. 975 * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
1025 * @ndev: The network device to restart autoneotiation on 976 * @ndev: The network device to restart autoneotiation on
1026 */ 977 */
1027static int 978static int
1028ks8695_nwayreset(struct net_device *ndev) 979ks8695_wan_nwayreset(struct net_device *ndev)
1029{ 980{
1030 struct ks8695_priv *ksp = netdev_priv(ndev); 981 struct ks8695_priv *ksp = netdev_priv(ndev);
1031 u32 ctrl; 982 u32 ctrl;
1032 983
1033 switch (ksp->dtype) { 984 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1034 case KS8695_DTYPE_HPNA:
1035 /* No phy means no autonegotiation on hpna */
1036 return -EINVAL;
1037 case KS8695_DTYPE_WAN:
1038 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1039
1040 if ((ctrl & WMC_WAND) == 0)
1041 writel(ctrl | WMC_WANR,
1042 ksp->phyiface_regs + KS8695_WMC);
1043 else
1044 /* auto-negotiation not enabled */
1045 return -EINVAL;
1046 break;
1047 case KS8695_DTYPE_LAN:
1048 return -EOPNOTSUPP;
1049 }
1050
1051 return 0;
1052}
1053 985
1054/** 986 if ((ctrl & WMC_WAND) == 0)
1055 * ks8695_get_link - Retrieve link status of network interface 987 writel(ctrl | WMC_WANR,
1056 * @ndev: The network interface to retrive the link status of. 988 ksp->phyiface_regs + KS8695_WMC);
1057 */ 989 else
1058static u32 990 /* auto-negotiation not enabled */
1059ks8695_get_link(struct net_device *ndev) 991 return -EINVAL;
1060{
1061 struct ks8695_priv *ksp = netdev_priv(ndev);
1062 u32 ctrl;
1063 992
1064 switch (ksp->dtype) {
1065 case KS8695_DTYPE_HPNA:
1066 /* HPNA always has link */
1067 return 1;
1068 case KS8695_DTYPE_WAN:
1069 /* WAN we can read the PHY for */
1070 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1071 return ctrl & WMC_WLS;
1072 case KS8695_DTYPE_LAN:
1073 return -EOPNOTSUPP;
1074 }
1075 return 0; 993 return 0;
1076} 994}
1077 995
1078/** 996/**
1079 * ks8695_get_pause - Retrieve network pause/flow-control advertising 997 * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
1080 * @ndev: The device to retrieve settings from 998 * @ndev: The device to retrieve settings from
1081 * @param: The structure to fill out with the information 999 * @param: The structure to fill out with the information
1082 */ 1000 */
1083static void 1001static void
1084ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) 1002ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1085{ 1003{
1086 struct ks8695_priv *ksp = netdev_priv(ndev); 1004 struct ks8695_priv *ksp = netdev_priv(ndev);
1087 u32 ctrl; 1005 u32 ctrl;
1088 1006
1089 switch (ksp->dtype) { 1007 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1090 case KS8695_DTYPE_HPNA:
1091 /* No phy link on hpna to configure */
1092 return;
1093 case KS8695_DTYPE_WAN:
1094 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1095
1096 /* advertise Pause */
1097 param->autoneg = (ctrl & WMC_WANAP);
1098 1008
1099 /* current Rx Flow-control */ 1009 /* advertise Pause */
1100 ctrl = ks8695_readreg(ksp, KS8695_DRXC); 1010 param->autoneg = (ctrl & WMC_WANAP);
1101 param->rx_pause = (ctrl & DRXC_RFCE);
1102 1011
1103 /* current Tx Flow-control */ 1012 /* current Rx Flow-control */
1104 ctrl = ks8695_readreg(ksp, KS8695_DTXC); 1013 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1105 param->tx_pause = (ctrl & DTXC_TFCE); 1014 param->rx_pause = (ctrl & DRXC_RFCE);
1106 break;
1107 case KS8695_DTYPE_LAN:
1108 /* The LAN's "phy" is a direct-attached switch */
1109 return;
1110 }
1111}
1112 1015
1113/** 1016 /* current Tx Flow-control */
1114 * ks8695_set_pause - Configure pause/flow-control 1017 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1115 * @ndev: The device to configure 1018 param->tx_pause = (ctrl & DTXC_TFCE);
1116 * @param: The pause parameters to set
1117 *
1118 * TODO: Implement this
1119 */
1120static int
1121ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1122{
1123 return -EOPNOTSUPP;
1124} 1019}
1125 1020
1126/** 1021/**
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1140static const struct ethtool_ops ks8695_ethtool_ops = { 1035static const struct ethtool_ops ks8695_ethtool_ops = {
1141 .get_msglevel = ks8695_get_msglevel, 1036 .get_msglevel = ks8695_get_msglevel,
1142 .set_msglevel = ks8695_set_msglevel, 1037 .set_msglevel = ks8695_set_msglevel,
1143 .get_settings = ks8695_get_settings, 1038 .get_drvinfo = ks8695_get_drvinfo,
1144 .set_settings = ks8695_set_settings, 1039};
1145 .nway_reset = ks8695_nwayreset, 1040
1146 .get_link = ks8695_get_link, 1041static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1147 .get_pauseparam = ks8695_get_pause, 1042 .get_msglevel = ks8695_get_msglevel,
1148 .set_pauseparam = ks8695_set_pause, 1043 .set_msglevel = ks8695_set_msglevel,
1044 .get_settings = ks8695_wan_get_settings,
1045 .set_settings = ks8695_wan_set_settings,
1046 .nway_reset = ks8695_wan_nwayreset,
1047 .get_link = ethtool_op_get_link,
1048 .get_pauseparam = ks8695_wan_get_pause,
1149 .get_drvinfo = ks8695_get_drvinfo, 1049 .get_drvinfo = ks8695_get_drvinfo,
1150}; 1050};
1151 1051
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev)
1541 1441
1542 /* driver system setup */ 1442 /* driver system setup */
1543 ndev->netdev_ops = &ks8695_netdev_ops; 1443 ndev->netdev_ops = &ks8695_netdev_ops;
1544 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1545 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1444 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1546 1445
1547 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); 1446 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev)
1608 if (ksp->phyiface_regs && ksp->link_irq == -1) { 1507 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1609 ks8695_init_switch(ksp); 1508 ks8695_init_switch(ksp);
1610 ksp->dtype = KS8695_DTYPE_LAN; 1509 ksp->dtype = KS8695_DTYPE_LAN;
1510 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1611 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { 1511 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1612 ks8695_init_wan_phy(ksp); 1512 ks8695_init_wan_phy(ksp);
1613 ksp->dtype = KS8695_DTYPE_WAN; 1513 ksp->dtype = KS8695_DTYPE_WAN;
1514 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1614 } else { 1515 } else {
1615 /* No initialisation since HPNA does not have a PHY */ 1516 /* No initialisation since HPNA does not have a PHY */
1616 ksp->dtype = KS8695_DTYPE_HPNA; 1517 ksp->dtype = KS8695_DTYPE_HPNA;
1518 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1617 } 1519 }
1618 1520
1619 /* And bring up the net_device with the net core */ 1521 /* And bring up the net_device with the net core */
@@ -1742,7 +1644,7 @@ ks8695_cleanup(void)
1742module_init(ks8695_init); 1644module_init(ks8695_init);
1743module_exit(ks8695_cleanup); 1645module_exit(ks8695_cleanup);
1744 1646
1745MODULE_AUTHOR("Simtec Electronics") 1647MODULE_AUTHOR("Simtec Electronics");
1746MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1747MODULE_LICENSE("GPL"); 1649MODULE_LICENSE("GPL");
1748MODULE_ALIAS("platform:" MODULENAME); 1650MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb5..3824382faecc 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, 48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, 49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
51 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
51 /* required last entry */ 52 /* required last entry */
52 { 0 } 53 { 0 }
53}; 54};
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..a179cc6d79f2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1786 spin_lock_bh(&adapter->mcc_lock); 1786 spin_lock_bh(&adapter->mcc_lock);
1787 1787
1788 wrb = wrb_from_mccq(adapter); 1788 wrb = wrb_from_mccq(adapter);
1789 if (!wrb) {
1790 status = -EBUSY;
1791 goto err;
1792 }
1789 req = nonemb_cmd->va; 1793 req = nonemb_cmd->va;
1790 sge = nonembedded_sgl(wrb); 1794 sge = nonembedded_sgl(wrb);
1791 1795
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1801 1805
1802 status = be_mcc_notify_wait(adapter); 1806 status = be_mcc_notify_wait(adapter);
1803 1807
1808err:
1804 spin_unlock_bh(&adapter->mcc_lock); 1809 spin_unlock_bh(&adapter->mcc_lock);
1805 return status; 1810 return status;
1806} 1811}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152f..28a32a6c8bf1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
312 if (adapter->link_up != link_up) { 312 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1; 313 adapter->link_speed = -1;
314 if (link_up) { 314 if (link_up) {
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev); 315 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name); 316 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else { 317 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev); 318 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name); 319 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 } 320 }
@@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev)
2628 2626
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630 BE_NAPI_WEIGHT); 2628 BE_NAPI_WEIGHT);
2631
2632 netif_stop_queue(netdev);
2633} 2629}
2634 2630
2635static void be_unmap_pci_bars(struct be_adapter *adapter) 2631static void be_unmap_pci_bars(struct be_adapter *adapter)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b9fc5173aef..22abfb39d813 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1284,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
1284{ 1284{
1285 u32 emac_hashhi, emac_hashlo; 1285 u32 emac_hashhi, emac_hashlo;
1286 struct netdev_hw_addr *ha; 1286 struct netdev_hw_addr *ha;
1287 char *addrs;
1288 u32 crc; 1287 u32 crc;
1289 1288
1290 emac_hashhi = emac_hashlo = 0; 1289 emac_hashhi = emac_hashlo = 0;
1291 1290
1292 netdev_for_each_mc_addr(ha, dev) { 1291 netdev_for_each_mc_addr(ha, dev) {
1293 addrs = ha->addr; 1292 crc = ether_crc(ETH_ALEN, ha->addr);
1294
1295 /* skip non-multicast addresses */
1296 if (!(*addrs & 1))
1297 continue;
1298
1299 crc = ether_crc(ETH_ALEN, addrs);
1300 crc >>= 26; 1293 crc >>= 26;
1301 1294
1302 if (crc & 0x20) 1295 if (crc & 0x20)
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae91991..142d6047da27 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
275 275
276 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); 276 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
277 if (ioc_attr) { 277 if (ioc_attr) {
278 memset(ioc_attr, 0, sizeof(*ioc_attr));
279 spin_lock_irqsave(&bnad->bna_lock, flags); 278 spin_lock_irqsave(&bnad->bna_lock, flags);
280 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 279 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
281 spin_unlock_irqrestore(&bnad->bna_lock, flags); 280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464a..0ba59d5aeb7f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -7553,6 +7553,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
7553 !(data & ETH_FLAG_RXVLAN)) 7553 !(data & ETH_FLAG_RXVLAN))
7554 return -EINVAL; 7554 return -EINVAL;
7555 7555
7556 /* TSO with VLAN tag won't work with current firmware */
7557 if (!(data & ETH_FLAG_TXVLAN))
7558 return -EINVAL;
7559
7556 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7560 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7557 ETH_FLAG_TXVLAN); 7561 ETH_FLAG_TXVLAN);
7558 if (rc) 7562 if (rc)
@@ -7962,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7962 7966
7963 /* AER (Advanced Error Reporting) hooks */ 7967 /* AER (Advanced Error Reporting) hooks */
7964 err = pci_enable_pcie_error_reporting(pdev); 7968 err = pci_enable_pcie_error_reporting(pdev);
7965 if (err) { 7969 if (!err)
7966 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7970 bp->flags |= BNX2_FLAG_AER_ENABLED;
7967 "failed 0x%x\n", err);
7968 /* non-fatal, continue */
7969 }
7970 7971
7971 } else { 7972 } else {
7972 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7973 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8229 return 0; 8230 return 0;
8230 8231
8231err_out_unmap: 8232err_out_unmap:
8232 if (bp->flags & BNX2_FLAG_PCIE) 8233 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8233 pci_disable_pcie_error_reporting(pdev); 8234 pci_disable_pcie_error_reporting(pdev);
8235 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8236 }
8234 8237
8235 if (bp->regview) { 8238 if (bp->regview) {
8236 iounmap(bp->regview); 8239 iounmap(bp->regview);
@@ -8418,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8418 8421
8419 kfree(bp->temp_stats_blk); 8422 kfree(bp->temp_stats_blk);
8420 8423
8421 if (bp->flags & BNX2_FLAG_PCIE) 8424 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8422 pci_disable_pcie_error_reporting(pdev); 8425 pci_disable_pcie_error_reporting(pdev);
8426 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8427 }
8423 8428
8424 free_netdev(dev); 8429 free_netdev(dev);
8425 8430
@@ -8535,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8535 } 8540 }
8536 rtnl_unlock(); 8541 rtnl_unlock();
8537 8542
8538 if (!(bp->flags & BNX2_FLAG_PCIE)) 8543 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8539 return result; 8544 return result;
8540 8545
8541 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8546 err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe9..f459fb2f9add 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6741,6 +6741,7 @@ struct bnx2 {
6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
6743#define BNX2_FLAG_BROKEN_STATS 0x00002000 6743#define BNX2_FLAG_BROKEN_STATS 0x00002000
6744#define BNX2_FLAG_AER_ENABLED 0x00004000
6744 6745
6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6746 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6746 6747
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c9436..653c62475cb6 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-3" 25#define DRV_MODULE_VERSION "1.62.00-5"
26#define DRV_MODULE_RELDATE "2010/12/21" 26#define DRV_MODULE_RELDATE "2011/01/30"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f63989..548f5631c0dc 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -352,6 +352,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
353 /* forced only */ 353 /* forced only */
354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
355 /* Indicate whether to swap the external phy polarity */
356#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
357#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
358#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
355 359
356 u32 external_phy_config; 360 u32 external_phy_config;
357#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 361#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f391..dd1210fddfff 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1573,7 +1573,7 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1573 1573
1574 offset = phy->addr + ser_lane; 1574 offset = phy->addr + ser_lane;
1575 if (CHIP_IS_E2(bp)) 1575 if (CHIP_IS_E2(bp))
1576 aer_val = 0x2800 + offset - 1; 1576 aer_val = 0x3800 + offset - 1;
1577 else 1577 else
1578 aer_val = 0x3800 + offset; 1578 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1579 CL45_WR_OVER_CL22(bp, phy,
@@ -3166,7 +3166,23 @@ u8 bnx2x_set_led(struct link_params *params,
3166 if (!vars->link_up) 3166 if (!vars->link_up)
3167 break; 3167 break;
3168 case LED_MODE_ON: 3168 case LED_MODE_ON:
3169 if (SINGLE_MEDIA_DIRECT(params)) { 3169 if (params->phy[EXT_PHY1].type ==
3170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 /**
3173 * This is a work-around for E2+8727 Configurations
3174 */
3175 if (mode == LED_MODE_ON ||
3176 speed == SPEED_10000){
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3178 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3179
3180 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3181 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3182 (tmp | EMAC_LED_OVERRIDE));
3183 return rc;
3184 }
3185 } else if (SINGLE_MEDIA_DIRECT(params)) {
3170 /** 3186 /**
3171 * This is a work-around for HW issue found when link 3187 * This is a work-around for HW issue found when link
3172 * is up in CL73 3188 * is up in CL73
@@ -3854,11 +3870,14 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3854 pause_result); 3870 pause_result);
3855 } 3871 }
3856} 3872}
3857 3873static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3858static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3859 struct bnx2x_phy *phy, 3874 struct bnx2x_phy *phy,
3860 u8 port) 3875 u8 port)
3861{ 3876{
3877 u32 count = 0;
3878 u16 fw_ver1, fw_msgout;
3879 u8 rc = 0;
3880
3862 /* Boot port from external ROM */ 3881 /* Boot port from external ROM */
3863 /* EDC grst */ 3882 /* EDC grst */
3864 bnx2x_cl45_write(bp, phy, 3883 bnx2x_cl45_write(bp, phy,
@@ -3888,56 +3907,45 @@ static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3888 MDIO_PMA_REG_GEN_CTRL, 3907 MDIO_PMA_REG_GEN_CTRL,
3889 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3908 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3890 3909
3891 /* wait for 120ms for code download via SPI port */ 3910 /* Delay 100ms per the PHY specifications */
3892 msleep(120); 3911 msleep(100);
3912
3913 /* 8073 sometimes taking longer to download */
3914 do {
3915 count++;
3916 if (count > 300) {
3917 DP(NETIF_MSG_LINK,
3918 "bnx2x_8073_8727_external_rom_boot port %x:"
3919 "Download failed. fw version = 0x%x\n",
3920 port, fw_ver1);
3921 rc = -EINVAL;
3922 break;
3923 }
3924
3925 bnx2x_cl45_read(bp, phy,
3926 MDIO_PMA_DEVAD,
3927 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3928 bnx2x_cl45_read(bp, phy,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3931
3932 msleep(1);
3933 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3934 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3893 3936
3894 /* Clear ser_boot_ctl bit */ 3937 /* Clear ser_boot_ctl bit */
3895 bnx2x_cl45_write(bp, phy, 3938 bnx2x_cl45_write(bp, phy,
3896 MDIO_PMA_DEVAD, 3939 MDIO_PMA_DEVAD,
3897 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3940 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3898 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3941 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3899}
3900 3942
3901static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, 3943 DP(NETIF_MSG_LINK,
3902 struct bnx2x_phy *phy) 3944 "bnx2x_8073_8727_external_rom_boot port %x:"
3903{ 3945 "Download complete. fw version = 0x%x\n",
3904 u16 val; 3946 port, fw_ver1);
3905 bnx2x_cl45_read(bp, phy,
3906 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
3907
3908 if (val == 0) {
3909 /* Mustn't set low power mode in 8073 A0 */
3910 return;
3911 }
3912
3913 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3914 bnx2x_cl45_read(bp, phy,
3915 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3916 val &= ~(1<<13);
3917 bnx2x_cl45_write(bp, phy,
3918 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3919
3920 /* PLL controls */
3921 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3922 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3923 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3924 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3925 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3926
3927 /* Tx Controls */
3928 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3929 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3930 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3931
3932 /* Rx Controls */
3933 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3934 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
3935 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
3936 3947
3937 /* Enable PLL sequencer (use read-modify-write to set bit 13) */ 3948 return rc;
3938 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3939 val |= (1<<13);
3940 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3941} 3949}
3942 3950
3943/******************************************************************/ 3951/******************************************************************/
@@ -4098,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4098 4106
4099 bnx2x_8073_set_pause_cl37(params, phy, vars); 4107 bnx2x_8073_set_pause_cl37(params, phy, vars);
4100 4108
4101 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
4102
4103 bnx2x_cl45_read(bp, phy, 4109 bnx2x_cl45_read(bp, phy,
4104 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 4110 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4105 4111
@@ -4108,6 +4114,25 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4108 4114
4109 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4115 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4110 4116
4117 /**
4118 * If this is forced speed, set to KR or KX (all other are not
4119 * supported)
4120 */
4121 /* Swap polarity if required - Must be done only in non-1G mode */
4122 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4123 /* Configure the 8073 to swap _P and _N of the KR lines */
4124 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4125 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4126 bnx2x_cl45_read(bp, phy,
4127 MDIO_PMA_DEVAD,
4128 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4129 bnx2x_cl45_write(bp, phy,
4130 MDIO_PMA_DEVAD,
4131 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4132 (val | (3<<9)));
4133 }
4134
4135
4111 /* Enable CL37 BAM */ 4136 /* Enable CL37 BAM */
4112 if (REG_RD(bp, params->shmem_base + 4137 if (REG_RD(bp, params->shmem_base +
4113 offsetof(struct shmem_region, dev_info. 4138 offsetof(struct shmem_region, dev_info.
@@ -4314,8 +4339,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4314 } 4339 }
4315 4340
4316 if (link_up) { 4341 if (link_up) {
4342 /* Swap polarity if required */
4343 if (params->lane_config &
4344 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4345 /* Configure the 8073 to swap P and N of the KR lines */
4346 bnx2x_cl45_read(bp, phy,
4347 MDIO_XS_DEVAD,
4348 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4349 /**
4350 * Set bit 3 to invert Rx in 1G mode and clear this bit
4351 * when it`s in 10G mode.
4352 */
4353 if (vars->line_speed == SPEED_1000) {
4354 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4355 "the 8073\n");
4356 val1 |= (1<<3);
4357 } else
4358 val1 &= ~(1<<3);
4359
4360 bnx2x_cl45_write(bp, phy,
4361 MDIO_XS_DEVAD,
4362 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4363 val1);
4364 }
4317 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 4365 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4318 bnx2x_8073_resolve_fc(phy, params, vars); 4366 bnx2x_8073_resolve_fc(phy, params, vars);
4367 vars->duplex = DUPLEX_FULL;
4319 } 4368 }
4320 return link_up; 4369 return link_up;
4321} 4370}
@@ -5062,6 +5111,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5062 else 5111 else
5063 vars->line_speed = SPEED_10000; 5112 vars->line_speed = SPEED_10000;
5064 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5113 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5114 vars->duplex = DUPLEX_FULL;
5065 } 5115 }
5066 return link_up; 5116 return link_up;
5067} 5117}
@@ -5758,8 +5808,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5758 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 5808 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5759 params->port); 5809 params->port);
5760 } 5810 }
5761 if (link_up) 5811 if (link_up) {
5762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5812 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5813 vars->duplex = DUPLEX_FULL;
5814 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5815 }
5763 5816
5764 if ((DUAL_MEDIA(params)) && 5817 if ((DUAL_MEDIA(params)) &&
5765 (phy->req_line_speed == SPEED_1000)) { 5818 (phy->req_line_speed == SPEED_1000)) {
@@ -5875,10 +5928,26 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
5875 MDIO_PMA_REG_8481_LED2_MASK, 5928 MDIO_PMA_REG_8481_LED2_MASK,
5876 0x18); 5929 0x18);
5877 5930
5931 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5878 bnx2x_cl45_write(bp, phy, 5932 bnx2x_cl45_write(bp, phy,
5879 MDIO_PMA_DEVAD, 5933 MDIO_PMA_DEVAD,
5880 MDIO_PMA_REG_8481_LED3_MASK, 5934 MDIO_PMA_REG_8481_LED3_MASK,
5881 0x0040); 5935 0x0006);
5936
5937 /* Select the closest activity blink rate to that in 10/100/1000 */
5938 bnx2x_cl45_write(bp, phy,
5939 MDIO_PMA_DEVAD,
5940 MDIO_PMA_REG_8481_LED3_BLINK,
5941 0);
5942
5943 bnx2x_cl45_read(bp, phy,
5944 MDIO_PMA_DEVAD,
5945 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
5946 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
5947
5948 bnx2x_cl45_write(bp, phy,
5949 MDIO_PMA_DEVAD,
5950 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
5882 5951
5883 /* 'Interrupt Mask' */ 5952 /* 'Interrupt Mask' */
5884 bnx2x_cl45_write(bp, phy, 5953 bnx2x_cl45_write(bp, phy,
@@ -6126,6 +6195,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6126 /* Check link 10G */ 6195 /* Check link 10G */
6127 if (val2 & (1<<11)) { 6196 if (val2 & (1<<11)) {
6128 vars->line_speed = SPEED_10000; 6197 vars->line_speed = SPEED_10000;
6198 vars->duplex = DUPLEX_FULL;
6129 link_up = 1; 6199 link_up = 1;
6130 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6200 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6131 } else { /* Check Legacy speed link */ 6201 } else { /* Check Legacy speed link */
@@ -6405,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6405 MDIO_PMA_DEVAD, 6475 MDIO_PMA_DEVAD,
6406 MDIO_PMA_REG_8481_LED1_MASK, 6476 MDIO_PMA_REG_8481_LED1_MASK,
6407 0x80); 6477 0x80);
6478
6479 /* Tell LED3 to blink on source */
6480 bnx2x_cl45_read(bp, phy,
6481 MDIO_PMA_DEVAD,
6482 MDIO_PMA_REG_8481_LINK_SIGNAL,
6483 &val);
6484 val &= ~(7<<6);
6485 val |= (1<<6); /* A83B[8:6]= 1 */
6486 bnx2x_cl45_write(bp, phy,
6487 MDIO_PMA_DEVAD,
6488 MDIO_PMA_REG_8481_LINK_SIGNAL,
6489 val);
6408 } 6490 }
6409 break; 6491 break;
6410 } 6492 }
@@ -6489,6 +6571,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6489 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6571 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6490 &val2); 6572 &val2);
6491 vars->line_speed = SPEED_10000; 6573 vars->line_speed = SPEED_10000;
6574 vars->duplex = DUPLEX_FULL;
6492 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 6575 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6493 val2, (val2 & (1<<14))); 6576 val2, (val2 & (1<<14)));
6494 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6577 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -7605,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7605 struct bnx2x_phy phy[PORT_MAX]; 7688 struct bnx2x_phy phy[PORT_MAX];
7606 struct bnx2x_phy *phy_blk[PORT_MAX]; 7689 struct bnx2x_phy *phy_blk[PORT_MAX];
7607 u16 val; 7690 u16 val;
7608 s8 port; 7691 s8 port = 0;
7609 s8 port_of_path = 0; 7692 s8 port_of_path = 0;
7610 7693 u32 swap_val, swap_override;
7611 bnx2x_ext_phy_hw_reset(bp, 0); 7694 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7695 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7696 port ^= (swap_val && swap_override);
7697 bnx2x_ext_phy_hw_reset(bp, port);
7612 /* PART1 - Reset both phys */ 7698 /* PART1 - Reset both phys */
7613 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7699 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7614 u32 shmem_base, shmem2_base; 7700 u32 shmem_base, shmem2_base;
@@ -7663,7 +7749,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7663 7749
7664 /* PART2 - Download firmware to both phys */ 7750 /* PART2 - Download firmware to both phys */
7665 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7751 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7666 u16 fw_ver1;
7667 if (CHIP_IS_E2(bp)) 7752 if (CHIP_IS_E2(bp))
7668 port_of_path = 0; 7753 port_of_path = 0;
7669 else 7754 else
@@ -7671,19 +7756,9 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7671 7756
7672 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7757 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7673 phy_blk[port]->addr); 7758 phy_blk[port]->addr);
7674 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7759 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7675 port_of_path); 7760 port_of_path))
7676
7677 bnx2x_cl45_read(bp, phy_blk[port],
7678 MDIO_PMA_DEVAD,
7679 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7680 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7681 DP(NETIF_MSG_LINK,
7682 "bnx2x_8073_common_init_phy port %x:"
7683 "Download failed. fw version = 0x%x\n",
7684 port, fw_ver1);
7685 return -EINVAL; 7761 return -EINVAL;
7686 }
7687 7762
7688 /* Only set bit 10 = 1 (Tx power down) */ 7763 /* Only set bit 10 = 1 (Tx power down) */
7689 bnx2x_cl45_read(bp, phy_blk[port], 7764 bnx2x_cl45_read(bp, phy_blk[port],
@@ -7848,27 +7923,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7848 } 7923 }
7849 /* PART2 - Download firmware to both phys */ 7924 /* PART2 - Download firmware to both phys */
7850 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7925 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7851 u16 fw_ver1;
7852 if (CHIP_IS_E2(bp)) 7926 if (CHIP_IS_E2(bp))
7853 port_of_path = 0; 7927 port_of_path = 0;
7854 else 7928 else
7855 port_of_path = port; 7929 port_of_path = port;
7856 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7930 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7857 phy_blk[port]->addr); 7931 phy_blk[port]->addr);
7858 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7932 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7859 port_of_path); 7933 port_of_path))
7860 bnx2x_cl45_read(bp, phy_blk[port],
7861 MDIO_PMA_DEVAD,
7862 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7863 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7864 DP(NETIF_MSG_LINK,
7865 "bnx2x_8727_common_init_phy port %x:"
7866 "Download failed. fw version = 0x%x\n",
7867 port, fw_ver1);
7868 return -EINVAL; 7934 return -EINVAL;
7869 }
7870 }
7871 7935
7936 }
7872 return 0; 7937 return 0;
7873} 7938}
7874 7939
@@ -7916,6 +7981,7 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7916 u32 shmem2_base_path[], u32 chip_id) 7981 u32 shmem2_base_path[], u32 chip_id)
7917{ 7982{
7918 u8 rc = 0; 7983 u8 rc = 0;
7984 u32 phy_ver;
7919 u8 phy_index; 7985 u8 phy_index;
7920 u32 ext_phy_type, ext_phy_config; 7986 u32 ext_phy_type, ext_phy_config;
7921 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 7987 DP(NETIF_MSG_LINK, "Begin common phy init\n");
@@ -7923,6 +7989,16 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7923 if (CHIP_REV_IS_EMUL(bp)) 7989 if (CHIP_REV_IS_EMUL(bp))
7924 return 0; 7990 return 0;
7925 7991
7992 /* Check if common init was already done */
7993 phy_ver = REG_RD(bp, shmem_base_path[0] +
7994 offsetof(struct shmem_region,
7995 port_mb[PORT_0].ext_phy_fw_version));
7996 if (phy_ver) {
7997 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
7998 phy_ver);
7999 return 0;
8000 }
8001
7926 /* Read the ext_phy_type for arbitrary port(0) */ 8002 /* Read the ext_phy_type for arbitrary port(0) */
7927 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 8003 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7928 phy_index++) { 8004 phy_index++) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1e..d584d32c747d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2301 /* accept matched ucast */ 2301 /* accept matched ucast */
2302 drop_all_ucast = 0; 2302 drop_all_ucast = 0;
2303 } 2303 }
2304 if (filters & BNX2X_ACCEPT_MULTICAST) { 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
2305 /* accept matched mcast */ 2305 /* accept matched mcast */
2306 drop_all_mcast = 0; 2306 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp)) 2307
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2314 /* accept all mcast */ 2309 /* accept all mcast */
2315 drop_all_ucast = 0; 2310 drop_all_ucast = 0;
@@ -4281,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4276 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4282 BNX2X_ACCEPT_MULTICAST; 4277 BNX2X_ACCEPT_MULTICAST;
4283#ifdef BCM_CNIC 4278#ifdef BCM_CNIC
4284 cl_id = bnx2x_fcoe(bp, cl_id); 4279 if (!NO_FCOE(bp)) {
4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4280 cl_id = bnx2x_fcoe(bp, cl_id);
4286 BNX2X_ACCEPT_MULTICAST); 4281 bnx2x_rxq_set_mac_filters(bp, cl_id,
4282 BNX2X_ACCEPT_UNICAST |
4283 BNX2X_ACCEPT_MULTICAST);
4284 }
4287#endif 4285#endif
4288 break; 4286 break;
4289 4287
@@ -4291,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4289 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4292 BNX2X_ACCEPT_ALL_MULTICAST; 4290 BNX2X_ACCEPT_ALL_MULTICAST;
4293#ifdef BCM_CNIC 4291#ifdef BCM_CNIC
4294 cl_id = bnx2x_fcoe(bp, cl_id); 4292 /*
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4293 * Prevent duplication of multicast packets by configuring FCoE
4296 BNX2X_ACCEPT_MULTICAST); 4294 * L2 Client to receive only matched unicast frames.
4295 */
4296 if (!NO_FCOE(bp)) {
4297 cl_id = bnx2x_fcoe(bp, cl_id);
4298 bnx2x_rxq_set_mac_filters(bp, cl_id,
4299 BNX2X_ACCEPT_UNICAST);
4300 }
4297#endif 4301#endif
4298 break; 4302 break;
4299 4303
4300 case BNX2X_RX_MODE_PROMISC: 4304 case BNX2X_RX_MODE_PROMISC:
4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE; 4305 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4302#ifdef BCM_CNIC 4306#ifdef BCM_CNIC
4303 cl_id = bnx2x_fcoe(bp, cl_id); 4307 /*
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4308 * Prevent packets duplication by configuring DROP_ALL for FCoE
4305 BNX2X_ACCEPT_MULTICAST); 4309 * L2 Client.
4310 */
4311 if (!NO_FCOE(bp)) {
4312 cl_id = bnx2x_fcoe(bp, cl_id);
4313 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4314 }
4306#endif 4315#endif
4307 /* pass management unicast packets as well */ 4316 /* pass management unicast packets as well */
4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4317 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5305,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5296 } 5305 }
5297 } 5306 }
5298 5307
5299 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5300 bp->common.shmem_base,
5301 bp->common.shmem2_base);
5302
5303 bnx2x_setup_fan_failure_detection(bp); 5308 bnx2x_setup_fan_failure_detection(bp);
5304 5309
5305 /* clear PXP2 attentions */ 5310 /* clear PXP2 attentions */
@@ -5503,9 +5508,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5503 5508
5504 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5509 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5505 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5510 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5506 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5507 bp->common.shmem_base,
5508 bp->common.shmem2_base);
5509 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, 5511 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5510 bp->common.shmem2_base, port)) { 5512 bp->common.shmem2_base, port)) {
5511 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5513 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -8379,6 +8381,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8381 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8380 bp->mdio.prtad = 8382 bp->mdio.prtad =
8381 XGXS_EXT_PHY_ADDR(ext_phy_config); 8383 XGXS_EXT_PHY_ADDR(ext_phy_config);
8384
8385 /*
8386 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8387 * In MF mode, it is set to cover self test cases
8388 */
8389 if (IS_MF(bp))
8390 bp->port.need_hw_lock = 1;
8391 else
8392 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8393 bp->common.shmem_base,
8394 bp->common.shmem2_base);
8382} 8395}
8383 8396
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8397static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d61..e01330bb36c7 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6194,7 +6194,11 @@ Theotherbitsarereservedandshouldbezero*/
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6197#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6198#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6197 6199
6200#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6201#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6198 6202
6199#define IGU_FUNC_BASE 0x0400 6203#define IGU_FUNC_BASE 0x0400
6200 6204
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e2bb39..1024ae158227 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2470 if (!(dev->flags & IFF_MASTER)) 2470 if (!(dev->flags & IFF_MASTER))
2471 goto out; 2471 goto out;
2472 2472
2473 skb = skb_share_check(skb, GFP_ATOMIC);
2474 if (!skb)
2475 goto out;
2476
2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2477 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2474 goto out; 2478 goto out;
2475 2479
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c65129..5c6fba802f2b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
326 goto out; 326 goto out;
327 } 327 }
328 328
329 skb = skb_share_check(skb, GFP_ATOMIC);
330 if (!skb)
331 goto out;
332
329 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
330 goto out; 334 goto out;
331 335
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf1..163e0b06eaa5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2733 if (!slave || !slave_do_arp_validate(bond, slave))
2734 goto out_unlock; 2734 goto out_unlock;
2735 2735
2736 skb = skb_share_check(skb, GFP_ATOMIC);
2737 if (!skb)
2738 goto out_unlock;
2739
2736 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2740 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
2737 goto out_unlock; 2741 goto out_unlock;
2738 2742
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade9..5dec456fd4a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
23 23
24 As only the sending and receiving of CAN frames is implemented, this 24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from: 25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de 26 www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
27 27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach, 28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see 29 slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -117,6 +117,8 @@ source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/usb/Kconfig" 118source "drivers/net/can/usb/Kconfig"
119 119
120source "drivers/net/can/softing/Kconfig"
121
120config CAN_DEBUG_DEVICES 122config CAN_DEBUG_DEVICES
121 bool "CAN devices debugging messages" 123 bool "CAN devices debugging messages"
122 depends on CAN 124 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f9..53c82a71778e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
9can-dev-y := dev.o 9can-dev-y := dev.o
10 10
11obj-y += usb/ 11obj-y += usb/
12obj-y += softing/
12 13
13obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
14obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/rtnetlink.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/string.h> 36#include <linux/string.h>
@@ -40,22 +41,23 @@
40 41
41#include <mach/board.h> 42#include <mach/board.h>
42 43
43#define AT91_NAPI_WEIGHT 12 44#define AT91_NAPI_WEIGHT 11
44 45
45/* 46/*
46 * RX/TX Mailbox split 47 * RX/TX Mailbox split
47 * don't dare to touch 48 * don't dare to touch
48 */ 49 */
49#define AT91_MB_RX_NUM 12 50#define AT91_MB_RX_NUM 11
50#define AT91_MB_TX_SHIFT 2 51#define AT91_MB_TX_SHIFT 2
51 52
52#define AT91_MB_RX_FIRST 0 53#define AT91_MB_RX_FIRST 1
53#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 54#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
54 55
55#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 56#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
56#define AT91_MB_RX_SPLIT 8 57#define AT91_MB_RX_SPLIT 8
57#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 58#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
58#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) 59#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
60 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
59 61
60#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
61#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) 63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
168 170
169 struct clk *clk; 171 struct clk *clk;
170 struct at91_can_data *pdata; 172 struct at91_can_data *pdata;
173
174 canid_t mb0_id;
171}; 175};
172 176
173static struct can_bittiming_const at91_bittiming_const = { 177static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
220 set_mb_mode_prio(priv, mb, mode, 0); 224 set_mb_mode_prio(priv, mb, mode, 0);
221} 225}
222 226
227static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
228{
229 u32 reg_mid;
230
231 if (can_id & CAN_EFF_FLAG)
232 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
233 else
234 reg_mid = (can_id & CAN_SFF_MASK) << 18;
235
236 return reg_mid;
237}
238
223/* 239/*
224 * Swtich transceiver on or off 240 * Swtich transceiver on or off
225 */ 241 */
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
233{ 249{
234 struct at91_priv *priv = netdev_priv(dev); 250 struct at91_priv *priv = netdev_priv(dev);
235 unsigned int i; 251 unsigned int i;
252 u32 reg_mid;
236 253
237 /* 254 /*
238 * The first 12 mailboxes are used as a reception FIFO. The 255 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
239 * last mailbox is configured with overwrite option. The 256 * mailbox is disabled. The next 11 mailboxes are used as a
240 * overwrite flag indicates a FIFO overflow. 257 * reception FIFO. The last mailbox is configured with
258 * overwrite option. The overwrite flag indicates a FIFO
259 * overflow.
241 */ 260 */
261 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
262 for (i = 0; i < AT91_MB_RX_FIRST; i++) {
263 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
264 at91_write(priv, AT91_MID(i), reg_mid);
265 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
266 }
267
242 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 268 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 269 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 270 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
255 281
256 /* Reset tx and rx helper pointers */ 282 /* Reset tx and rx helper pointers */
257 priv->tx_next = priv->tx_echo = priv->rx_next = 0; 283 priv->tx_next = priv->tx_echo = 0;
284 priv->rx_next = AT91_MB_RX_FIRST;
258} 285}
259 286
260static int at91_set_bittiming(struct net_device *dev) 287static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 399 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
373 return NETDEV_TX_BUSY; 400 return NETDEV_TX_BUSY;
374 } 401 }
375 402 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
376 if (cf->can_id & CAN_EFF_FLAG)
377 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
378 else
379 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
380
381 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 403 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
382 (cf->can_dlc << 16) | AT91_MCR_MTCR; 404 (cf->can_dlc << 16) | AT91_MCR_MTCR;
383 405
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
539 * 561 *
540 * Theory of Operation: 562 * Theory of Operation:
541 * 563 *
542 * 12 of the 16 mailboxes on the chip are reserved for RX. we split 564 * 11 of the 16 mailboxes on the chip are reserved for RX. we split
543 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. 565 * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
544 * 566 *
545 * Like it or not, but the chip always saves a received CAN message 567 * Like it or not, but the chip always saves a received CAN message
546 * into the first free mailbox it finds (starting with the 568 * into the first free mailbox it finds (starting with the
547 * lowest). This makes it very difficult to read the messages in the 569 * lowest). This makes it very difficult to read the messages in the
548 * right order from the chip. This is how we work around that problem: 570 * right order from the chip. This is how we work around that problem:
549 * 571 *
550 * The first message goes into mb nr. 0 and issues an interrupt. All 572 * The first message goes into mb nr. 1 and issues an interrupt. All
551 * rx ints are disabled in the interrupt handler and a napi poll is 573 * rx ints are disabled in the interrupt handler and a napi poll is
552 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 574 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
553 * receive another message). 575 * receive another message).
554 * 576 *
555 * lower mbxs upper 577 * lower mbxs upper
556 * ______^______ __^__ 578 * ____^______ __^__
557 * / \ / \ 579 * / \ / \
558 * +-+-+-+-+-+-+-+-++-+-+-+-+ 580 * +-+-+-+-+-+-+-+-++-+-+-+-+
559 * |x|x|x|x|x|x|x|x|| | | | | 581 * | |x|x|x|x|x|x|x|| | | | |
560 * +-+-+-+-+-+-+-+-++-+-+-+-+ 582 * +-+-+-+-+-+-+-+-++-+-+-+-+
561 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 583 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
562 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 584 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
585 * ^
586 * |
587 * \
588 * unused, due to chip bug
563 * 589 *
564 * The variable priv->rx_next points to the next mailbox to read a 590 * The variable priv->rx_next points to the next mailbox to read a
565 * message from. As long we're in the lower mailboxes we just read the 591 * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
590 "order of incoming frames cannot be guaranteed\n"); 616 "order of incoming frames cannot be guaranteed\n");
591 617
592 again: 618 again:
593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 619 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
594 mb < AT91_MB_RX_NUM && quota > 0; 620 mb < AT91_MB_RX_LAST + 1 && quota > 0;
595 reg_sr = at91_read(priv, AT91_SR), 621 reg_sr = at91_read(priv, AT91_SR),
596 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { 622 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
597 at91_read_msg(dev, mb); 623 at91_read_msg(dev, mb);
598 624
599 /* reactivate mailboxes */ 625 /* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
610 636
611 /* upper group completed, look again in lower */ 637 /* upper group completed, look again in lower */
612 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 638 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
613 quota > 0 && mb >= AT91_MB_RX_NUM) { 639 quota > 0 && mb > AT91_MB_RX_LAST) {
614 priv->rx_next = 0; 640 priv->rx_next = AT91_MB_RX_FIRST;
615 goto again; 641 goto again;
616 } 642 }
617 643
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
1037 .ndo_start_xmit = at91_start_xmit, 1063 .ndo_start_xmit = at91_start_xmit,
1038}; 1064};
1039 1065
1066static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1067 struct device_attribute *attr, char *buf)
1068{
1069 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1070
1071 if (priv->mb0_id & CAN_EFF_FLAG)
1072 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1073 else
1074 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1075}
1076
1077static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1078 struct device_attribute *attr, const char *buf, size_t count)
1079{
1080 struct net_device *ndev = to_net_dev(dev);
1081 struct at91_priv *priv = netdev_priv(ndev);
1082 unsigned long can_id;
1083 ssize_t ret;
1084 int err;
1085
1086 rtnl_lock();
1087
1088 if (ndev->flags & IFF_UP) {
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 err = strict_strtoul(buf, 0, &can_id);
1094 if (err) {
1095 ret = err;
1096 goto out;
1097 }
1098
1099 if (can_id & CAN_EFF_FLAG)
1100 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1101 else
1102 can_id &= CAN_SFF_MASK;
1103
1104 priv->mb0_id = can_id;
1105 ret = count;
1106
1107 out:
1108 rtnl_unlock();
1109 return ret;
1110}
1111
1112static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1114
1115static struct attribute *at91_sysfs_attrs[] = {
1116 &dev_attr_mb0_id.attr,
1117 NULL,
1118};
1119
1120static struct attribute_group at91_sysfs_attr_group = {
1121 .attrs = at91_sysfs_attrs,
1122};
1123
1040static int __devinit at91_can_probe(struct platform_device *pdev) 1124static int __devinit at91_can_probe(struct platform_device *pdev)
1041{ 1125{
1042 struct net_device *dev; 1126 struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1082 dev->netdev_ops = &at91_netdev_ops; 1166 dev->netdev_ops = &at91_netdev_ops;
1083 dev->irq = irq; 1167 dev->irq = irq;
1084 dev->flags |= IFF_ECHO; 1168 dev->flags |= IFF_ECHO;
1169 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1085 1170
1086 priv = netdev_priv(dev); 1171 priv = netdev_priv(dev);
1087 priv->can.clock.freq = clk_get_rate(clk); 1172 priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1093 priv->dev = dev; 1178 priv->dev = dev;
1094 priv->clk = clk; 1179 priv->clk = clk;
1095 priv->pdata = pdev->dev.platform_data; 1180 priv->pdata = pdev->dev.platform_data;
1181 priv->mb0_id = 0x7ff;
1096 1182
1097 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1183 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1098 1184
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a739..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1618 return count; 1618 return count;
1619} 1619}
1620 1620
1621static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, 1621static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
1622 ican3_sysfs_set_term); 1622 ican3_sysfs_set_term);
1623 1623
1624static struct attribute *ican3_sysfs_attrs[] = { 1624static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
185 185
186static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
187 .name = KBUILD_MODNAME, 187 .name = KBUILD_MODNAME,
188 .tseg1_min = 1, 188 .tseg1_min = 2,
189 .tseg1_max = 16, 189 .tseg1_max = 16,
190 .tseg2_min = 1, 190 .tseg2_min = 1,
191 .tseg2_max = 8, 191 .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
959 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
960 960
961 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi) 962 if (priv->use_msi)
964 pci_disable_msi(priv->dev); 963 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 964 pci_release_regions(pdev);
966 pci_disable_device(pdev); 965 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL); 966 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
968 pci_iounmap(pdev, priv->regs);
969 free_candev(priv->ndev); 969 free_candev(priv->ndev);
970} 970}
971 971
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1238 priv->use_msi = 0; 1238 priv->use_msi = 0;
1239 } else { 1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n"); 1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 pci_set_master(pdev);
1241 priv->use_msi = 1; 1242 priv->use_msi = 1;
1242 } 1243 }
1243 1244
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..8ba81b3ddd90
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
1config CAN_SOFTING
2 tristate "Softing Gmbh CAN generic support"
3 depends on CAN_DEV && HAS_IOMEM
4 ---help---
5 Support for CAN cards from Softing Gmbh & some cards
6 from Vector Gmbh.
7 Softing Gmbh CAN cards come with 1 or 2 physical busses.
8 Those cards typically use Dual Port RAM to communicate
9 with the host CPU. The interface is then identical for PCI
10 and PCMCIA cards. This driver operates on a platform device,
11 which has been created by softing_cs or softing_pci driver.
12 Warning:
13 The API of the card does not allow fine control per bus, but
14 controls the 2 busses on the card together.
15 As such, some actions (start/stop/busoff recovery) on 1 bus
16 must bring down the other bus too temporarily.
17
18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA
21 select CAN_SOFTING
22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh.
25 You need firmware for these, which you can get at
26 http://developer.berlios.de/projects/socketcan/
27 This version of the driver is written against
28 firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
29 In order to use the card as CAN device, you need the Softing generic
30 support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
1
2softing-y := softing_main.o softing_fw.o
3obj-$(CONFIG_CAN_SOFTING) += softing.o
4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
5
6ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
1/*
2 * softing common interfaces
3 *
4 * by Kurt Van Dijck, 2008-2010
5 */
6
7#include <linux/atomic.h>
8#include <linux/netdevice.h>
9#include <linux/ktime.h>
10#include <linux/mutex.h>
11#include <linux/spinlock.h>
12#include <linux/can.h>
13#include <linux/can/dev.h>
14
15#include "softing_platform.h"
16
17struct softing;
18
19struct softing_priv {
20 struct can_priv can; /* must be the first member! */
21 struct net_device *netdev;
22 struct softing *card;
23 struct {
24 int pending;
25 /* variables wich hold the circular buffer */
26 int echo_put;
27 int echo_get;
28 } tx;
29 struct can_bittiming_const btr_const;
30 int index;
31 uint8_t output;
32 uint16_t chip;
33};
34#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
35
36struct softing {
37 const struct softing_platform_data *pdat;
38 struct platform_device *pdev;
39 struct net_device *net[2];
40 spinlock_t spin; /* protect this structure & DPRAM access */
41 ktime_t ts_ref;
42 ktime_t ts_overflow; /* timestamp overflow value, in ktime */
43
44 struct {
45 /* indication of firmware status */
46 int up;
47 /* protection of the 'up' variable */
48 struct mutex lock;
49 } fw;
50 struct {
51 int nr;
52 int requested;
53 int svc_count;
54 unsigned int dpram_position;
55 } irq;
56 struct {
57 int pending;
58 int last_bus;
59 /*
60 * keep the bus that last tx'd a message,
61 * in order to let every netdev queue resume
62 */
63 } tx;
64 __iomem uint8_t *dpram;
65 unsigned long dpram_phys;
66 unsigned long dpram_size;
67 struct {
68 uint16_t fw_version, hw_version, license, serial;
69 uint16_t chip[2];
70 unsigned int freq; /* remote cpu's operating frequency */
71 } id;
72};
73
74extern int softing_default_output(struct net_device *netdev);
75
76extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
77
78extern int softing_chip_poweron(struct softing *card);
79
80extern int softing_bootloader_command(struct softing *card, int16_t cmd,
81 const char *msg);
82
83/* Load firmware after reset */
84extern int softing_load_fw(const char *file, struct softing *card,
85 __iomem uint8_t *virt, unsigned int size, int offset);
86
87/* Load final application firmware after bootloader */
88extern int softing_load_app_fw(const char *file, struct softing *card);
89
90/*
91 * enable or disable irq
92 * only called with fw.lock locked
93 */
94extern int softing_enable_irq(struct softing *card, int enable);
95
96/* start/stop 1 bus on card */
97extern int softing_startstop(struct net_device *netdev, int up);
98
99/* netif_rx() */
100extern int softing_netdev_rx(struct net_device *netdev,
101 const struct can_frame *msg, ktime_t ktime);
102
103/* SOFTING DPRAM mappings */
104#define DPRAM_RX 0x0000
105 #define DPRAM_RX_SIZE 32
106 #define DPRAM_RX_CNT 16
107#define DPRAM_RX_RD 0x0201 /* uint8_t */
108#define DPRAM_RX_WR 0x0205 /* uint8_t */
109#define DPRAM_RX_LOST 0x0207 /* uint8_t */
110
111#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
112#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
113#define DPRAM_FCT_HOST 0x032b /* uint16_t */
114
115#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
116#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
117#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
118#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
119#define DPRAM_RESET 0x0341 /* uint16_t */
120#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
121#define DPRAM_RESET_TIME 0x034d /* uint16_t */
122#define DPRAM_TIME 0x0350 /* uint64_t */
123#define DPRAM_WR_START 0x0358 /* uint8_t */
124#define DPRAM_WR_END 0x0359 /* uint8_t */
125#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
126#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
127#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
128#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
129#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
130
131#define DPRAM_TX 0x0400 /* uint16_t */
132 #define DPRAM_TX_SIZE 16
133 #define DPRAM_TX_CNT 32
134#define DPRAM_TX_RD 0x0601 /* uint8_t */
135#define DPRAM_TX_WR 0x0605 /* uint8_t */
136
137#define DPRAM_COMMAND 0x07e0 /* uint16_t */
138#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
139#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
140#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
141
142#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
143#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
144
145#define TXMAX (DPRAM_TX_CNT - 1)
146
147/* DPRAM return codes */
148#define RES_NONE 0
149#define RES_OK 1
150#define RES_NOK 2
151#define RES_UNKNOWN 3
152/* DPRAM flags */
153#define CMD_TX 0x01
154#define CMD_ACK 0x02
155#define CMD_XTD 0x04
156#define CMD_RTR 0x08
157#define CMD_ERR 0x10
158#define CMD_BUS2 0x80
159
160/* returned fifo entry bus state masks */
161#define SF_MASK_BUSOFF 0x80
162#define SF_MASK_EPASSIVE 0x60
163
164/* bus states */
165#define STATE_BUSOFF 2
166#define STATE_EPASSIVE 1
167#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..c11bb4de8630
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26
27#include "softing_platform.h"
28
29static int softingcs_index;
30static spinlock_t softingcs_index_lock;
31
32static int softingcs_reset(struct platform_device *pdev, int v);
33static int softingcs_enable_irq(struct platform_device *pdev, int v);
34
35/*
36 * platform_data descriptions
37 */
38#define MHZ (1000*1000)
39static const struct softing_platform_data softingcs_platform_data[] = {
40{
41 .name = "CANcard",
42 .manf = 0x0168, .prod = 0x001,
43 .generation = 1,
44 .nbus = 2,
45 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
46 .dpram_size = 0x0800,
47 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
48 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
49 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
50 .reset = softingcs_reset,
51 .enable_irq = softingcs_enable_irq,
52}, {
53 .name = "CANcard-NEC",
54 .manf = 0x0168, .prod = 0x002,
55 .generation = 1,
56 .nbus = 2,
57 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
58 .dpram_size = 0x0800,
59 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
60 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
61 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
62 .reset = softingcs_reset,
63 .enable_irq = softingcs_enable_irq,
64}, {
65 .name = "CANcard-SJA",
66 .manf = 0x0168, .prod = 0x004,
67 .generation = 1,
68 .nbus = 2,
69 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
70 .dpram_size = 0x0800,
71 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
72 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
73 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
74 .reset = softingcs_reset,
75 .enable_irq = softingcs_enable_irq,
76}, {
77 .name = "CANcard-2",
78 .manf = 0x0168, .prod = 0x005,
79 .generation = 2,
80 .nbus = 2,
81 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
82 .dpram_size = 0x1000,
83 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
84 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
85 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
86 .reset = softingcs_reset,
87 .enable_irq = NULL,
88}, {
89 .name = "Vector-CANcard",
90 .manf = 0x0168, .prod = 0x081,
91 .generation = 1,
92 .nbus = 2,
93 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
94 .dpram_size = 0x0800,
95 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
96 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
97 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
98 .reset = softingcs_reset,
99 .enable_irq = softingcs_enable_irq,
100}, {
101 .name = "Vector-CANcard-SJA",
102 .manf = 0x0168, .prod = 0x084,
103 .generation = 1,
104 .nbus = 2,
105 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
106 .dpram_size = 0x0800,
107 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
108 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
109 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
110 .reset = softingcs_reset,
111 .enable_irq = softingcs_enable_irq,
112}, {
113 .name = "Vector-CANcard-2",
114 .manf = 0x0168, .prod = 0x085,
115 .generation = 2,
116 .nbus = 2,
117 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
118 .dpram_size = 0x1000,
119 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
120 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
121 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
122 .reset = softingcs_reset,
123 .enable_irq = NULL,
124}, {
125 .name = "EDICcard-NEC",
126 .manf = 0x0168, .prod = 0x102,
127 .generation = 1,
128 .nbus = 2,
129 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
130 .dpram_size = 0x0800,
131 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
132 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
133 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
134 .reset = softingcs_reset,
135 .enable_irq = softingcs_enable_irq,
136}, {
137 .name = "EDICcard-2",
138 .manf = 0x0168, .prod = 0x105,
139 .generation = 2,
140 .nbus = 2,
141 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
142 .dpram_size = 0x1000,
143 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
144 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
145 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
146 .reset = softingcs_reset,
147 .enable_irq = NULL,
148}, {
149 0, 0,
150},
151};
152
153MODULE_FIRMWARE(fw_dir "bcard.bin");
154MODULE_FIRMWARE(fw_dir "ldcard.bin");
155MODULE_FIRMWARE(fw_dir "cancard.bin");
156MODULE_FIRMWARE(fw_dir "cansja.bin");
157
158MODULE_FIRMWARE(fw_dir "bcard2.bin");
159MODULE_FIRMWARE(fw_dir "ldcard2.bin");
160MODULE_FIRMWARE(fw_dir "cancrd2.bin");
161
162static __devinit const struct softing_platform_data
163*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
164{
165 const struct softing_platform_data *lp;
166
167 for (lp = softingcs_platform_data; lp->manf; ++lp) {
168 if ((lp->manf == manf) && (lp->prod == prod))
169 return lp;
170 }
171 return NULL;
172}
173
174/*
175 * platformdata callbacks
176 */
177static int softingcs_reset(struct platform_device *pdev, int v)
178{
179 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
180
181 dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
182 return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
183}
184
185static int softingcs_enable_irq(struct platform_device *pdev, int v)
186{
187 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
188
189 dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
190 return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
191}
192
193/*
194 * pcmcia check
195 */
196static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
197 void *priv_data)
198{
199 struct softing_platform_data *pdat = priv_data;
200 struct resource *pres;
201 int memspeed = 0;
202
203 WARN_ON(!pdat);
204 pres = pcmcia->resource[PCMCIA_IOMEM_0];
205 if (resource_size(pres) < 0x1000)
206 return -ERANGE;
207
208 pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
209 if (pdat->generation < 2) {
210 pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
211 memspeed = 3;
212 } else {
213 pres->flags |= WIN_DATA_WIDTH_16;
214 }
215 return pcmcia_request_window(pcmcia, pres, memspeed);
216}
217
218static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
219{
220 struct platform_device *pdev = pcmcia->priv;
221
222 /* free bits */
223 platform_device_unregister(pdev);
224 /* release pcmcia stuff */
225 pcmcia_disable_device(pcmcia);
226}
227
228/*
229 * platform_device wrapper
230 * pdev->resource has 2 entries: io & irq
231 */
232static void softingcs_pdev_release(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 kfree(pdev);
236}
237
238static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
239{
240 int ret;
241 struct platform_device *pdev;
242 const struct softing_platform_data *pdat;
243 struct resource *pres;
244 struct dev {
245 struct platform_device pdev;
246 struct resource res[2];
247 } *dev;
248
249 /* find matching platform_data */
250 pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
251 if (!pdat)
252 return -ENOTTY;
253
254 /* setup pcmcia device */
255 pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
256 CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
257 ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
258 if (ret)
259 goto pcmcia_failed;
260
261 ret = pcmcia_enable_device(pcmcia);
262 if (ret < 0)
263 goto pcmcia_failed;
264
265 pres = pcmcia->resource[PCMCIA_IOMEM_0];
266 if (!pres) {
267 ret = -EBADF;
268 goto pcmcia_bad;
269 }
270
271 /* create softing platform device */
272 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 if (!dev) {
274 ret = -ENOMEM;
275 goto mem_failed;
276 }
277 dev->pdev.resource = dev->res;
278 dev->pdev.num_resources = ARRAY_SIZE(dev->res);
279 dev->pdev.dev.release = softingcs_pdev_release;
280
281 pdev = &dev->pdev;
282 pdev->dev.platform_data = (void *)pdat;
283 pdev->dev.parent = &pcmcia->dev;
284 pcmcia->priv = pdev;
285
286 /* platform device resources */
287 pdev->resource[0].flags = IORESOURCE_MEM;
288 pdev->resource[0].start = pres->start;
289 pdev->resource[0].end = pres->end;
290
291 pdev->resource[1].flags = IORESOURCE_IRQ;
292 pdev->resource[1].start = pcmcia->irq;
293 pdev->resource[1].end = pdev->resource[1].start;
294
295 /* platform device setup */
296 spin_lock(&softingcs_index_lock);
297 pdev->id = softingcs_index++;
298 spin_unlock(&softingcs_index_lock);
299 pdev->name = "softing";
300 dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
301 ret = platform_device_register(pdev);
302 if (ret < 0)
303 goto platform_failed;
304
305 dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
306 return 0;
307
308platform_failed:
309 kfree(dev);
310mem_failed:
311pcmcia_bad:
312pcmcia_failed:
313 pcmcia_disable_device(pcmcia);
314 pcmcia->priv = NULL;
315 return ret ?: -ENODEV;
316}
317
318static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
319 /* softing */
320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
322 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
323 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
324 /* vector, manufacturer? */
325 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
326 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
327 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
328 /* EDIC */
329 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
330 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
331 PCMCIA_DEVICE_NULL,
332};
333
334MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
335
336static struct pcmcia_driver softingcs_driver = {
337 .owner = THIS_MODULE,
338 .name = "softingcs",
339 .id_table = softingcs_ids,
340 .probe = softingcs_probe,
341 .remove = __devexit_p(softingcs_remove),
342};
343
344static int __init softingcs_start(void)
345{
346 spin_lock_init(&softingcs_index_lock);
347 return pcmcia_register_driver(&softingcs_driver);
348}
349
350static void __exit softingcs_stop(void)
351{
352 pcmcia_unregister_driver(&softingcs_driver);
353}
354
355module_init(softingcs_start);
356module_exit(softingcs_stop);
357
358MODULE_DESCRIPTION("softing CANcard driver"
359 ", links PCMCIA card to softing driver");
360MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/firmware.h>
21#include <linux/sched.h>
22#include <asm/div64.h>
23
24#include "softing.h"
25
26/*
27 * low level DPRAM command.
28 * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
29 */
30static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
31 const char *msg)
32{
33 int ret;
34 unsigned long stamp;
35
36 iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
37 iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
38 iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
39 /* be sure to flush this to the card */
40 wmb();
41 stamp = jiffies + 1 * HZ;
42 /* wait for card */
43 do {
44 /* DPRAM_FCT_HOST is _not_ aligned */
45 ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
46 (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
47 /* don't have any cached variables */
48 rmb();
49 if (ret == RES_OK)
50 /* read return-value now */
51 return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
52
53 if ((ret != vector) || time_after(jiffies, stamp))
54 break;
55 /* process context => relax */
56 usleep_range(500, 10000);
57 } while (1);
58
59 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
60 dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
61 return ret;
62}
63
64static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
65{
66 int ret;
67
68 ret = _softing_fct_cmd(card, cmd, 0, msg);
69 if (ret > 0) {
70 dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
71 ret = -EIO;
72 }
73 return ret;
74}
75
76int softing_bootloader_command(struct softing *card, int16_t cmd,
77 const char *msg)
78{
79 int ret;
80 unsigned long stamp;
81
82 iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
83 iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
84 /* be sure to flush this to the card */
85 wmb();
86 stamp = jiffies + 3 * HZ;
87 /* wait for card */
88 do {
89 ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
90 /* don't have any cached variables */
91 rmb();
92 if (ret == RES_OK)
93 return 0;
94 if (time_after(jiffies, stamp))
95 break;
96 /* process context => relax */
97 usleep_range(500, 10000);
98 } while (!signal_pending(current));
99
100 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
101 dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
102 return ret;
103}
104
105static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
106 uint16_t *plen, const uint8_t **pdat)
107{
108 uint16_t checksum[2];
109 const uint8_t *mem;
110 const uint8_t *end;
111
112 /*
113 * firmware records are a binary, unaligned stream composed of:
114 * uint16_t type;
115 * uint32_t addr;
116 * uint16_t len;
117 * uint8_t dat[len];
118 * uint16_t checksum;
119 * all values in little endian.
120 * We could define a struct for this, with __attribute__((packed)),
121 * but would that solve the alignment in _all_ cases (cfr. the
122 * struct itself may be an odd address)?
123 *
124 * I chose to use leXX_to_cpup() since this solves both
125 * endianness & alignment.
126 */
127 mem = *pmem;
128 *ptype = le16_to_cpup((void *)&mem[0]);
129 *paddr = le32_to_cpup((void *)&mem[2]);
130 *plen = le16_to_cpup((void *)&mem[6]);
131 *pdat = &mem[8];
132 /* verify checksum */
133 end = &mem[8 + *plen];
134 checksum[0] = le16_to_cpup((void *)end);
135 for (checksum[1] = 0; mem < end; ++mem)
136 checksum[1] += *mem;
137 if (checksum[0] != checksum[1])
138 return -EINVAL;
139 /* increment */
140 *pmem += 10 + *plen;
141 return 0;
142}
143
144int softing_load_fw(const char *file, struct softing *card,
145 __iomem uint8_t *dpram, unsigned int size, int offset)
146{
147 const struct firmware *fw;
148 int ret;
149 const uint8_t *mem, *end, *dat;
150 uint16_t type, len;
151 uint32_t addr;
152 uint8_t *buf = NULL;
153 int buflen = 0;
154 int8_t type_end = 0;
155
156 ret = request_firmware(&fw, file, &card->pdev->dev);
157 if (ret < 0)
158 return ret;
159 dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
160 ", offset %c0x%04x\n",
161 card->pdat->name, file, (unsigned int)fw->size,
162 (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
163 /* parse the firmware */
164 mem = fw->data;
165 end = &mem[fw->size];
166 /* look for header record */
167 ret = fw_parse(&mem, &type, &addr, &len, &dat);
168 if (ret < 0)
169 goto failed;
170 if (type != 0xffff)
171 goto failed;
172 if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
173 ret = -EINVAL;
174 goto failed;
175 }
176 /* ok, we had a header */
177 while (mem < end) {
178 ret = fw_parse(&mem, &type, &addr, &len, &dat);
179 if (ret < 0)
180 goto failed;
181 if (type == 3) {
182 /* start address, not used here */
183 continue;
184 } else if (type == 1) {
185 /* eof */
186 type_end = 1;
187 break;
188 } else if (type != 0) {
189 ret = -EINVAL;
190 goto failed;
191 }
192
193 if ((addr + len + offset) > size)
194 goto failed;
195 memcpy_toio(&dpram[addr + offset], dat, len);
196 /* be sure to flush caches from IO space */
197 mb();
198 if (len > buflen) {
199 /* align buflen */
200 buflen = (len + (1024-1)) & ~(1024-1);
201 buf = krealloc(buf, buflen, GFP_KERNEL);
202 if (!buf) {
203 ret = -ENOMEM;
204 goto failed;
205 }
206 }
207 /* verify record data */
208 memcpy_fromio(buf, &dpram[addr + offset], len);
209 if (memcmp(buf, dat, len)) {
210 /* is not ok */
211 dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
212 ret = -EIO;
213 goto failed;
214 }
215 }
216 if (!type_end)
217 /* no end record seen */
218 goto failed;
219 ret = 0;
220failed:
221 kfree(buf);
222 release_firmware(fw);
223 if (ret < 0)
224 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
225 return ret;
226}
227
228int softing_load_app_fw(const char *file, struct softing *card)
229{
230 const struct firmware *fw;
231 const uint8_t *mem, *end, *dat;
232 int ret, j;
233 uint16_t type, len;
234 uint32_t addr, start_addr = 0;
235 unsigned int sum, rx_sum;
236 int8_t type_end = 0, type_entrypoint = 0;
237
238 ret = request_firmware(&fw, file, &card->pdev->dev);
239 if (ret) {
240 dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
241 file, ret);
242 return ret;
243 }
244 dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
245 file, (unsigned long)fw->size);
246 /* parse the firmware */
247 mem = fw->data;
248 end = &mem[fw->size];
249 /* look for header record */
250 ret = fw_parse(&mem, &type, &addr, &len, &dat);
251 if (ret)
252 goto failed;
253 ret = -EINVAL;
254 if (type != 0xffff) {
255 dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
256 type);
257 goto failed;
258 }
259 if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
260 dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
261 len, dat);
262 goto failed;
263 }
264 /* ok, we had a header */
265 while (mem < end) {
266 ret = fw_parse(&mem, &type, &addr, &len, &dat);
267 if (ret)
268 goto failed;
269
270 if (type == 3) {
271 /* start address */
272 start_addr = addr;
273 type_entrypoint = 1;
274 continue;
275 } else if (type == 1) {
276 /* eof */
277 type_end = 1;
278 break;
279 } else if (type != 0) {
280 dev_alert(&card->pdev->dev,
281 "unknown record type 0x%04x\n", type);
282 ret = -EINVAL;
283 goto failed;
284 }
285
286 /* regualar data */
287 for (sum = 0, j = 0; j < len; ++j)
288 sum += dat[j];
289 /* work in 16bit (target) */
290 sum &= 0xffff;
291
292 memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
293 iowrite32(card->pdat->app.offs + card->pdat->app.addr,
294 &card->dpram[DPRAM_COMMAND + 2]);
295 iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
296 iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
297 iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
298 ret = softing_bootloader_command(card, 1, "loading app.");
299 if (ret < 0)
300 goto failed;
301 /* verify checksum */
302 rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
303 if (rx_sum != sum) {
304 dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
305 ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
306 ret = -EIO;
307 goto failed;
308 }
309 }
310 if (!type_end || !type_entrypoint)
311 goto failed;
312 /* start application in card */
313 iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
314 iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
315 ret = softing_bootloader_command(card, 3, "start app.");
316 if (ret < 0)
317 goto failed;
318 ret = 0;
319failed:
320 release_firmware(fw);
321 if (ret < 0)
322 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
323 return ret;
324}
325
326static int softing_reset_chip(struct softing *card)
327{
328 int ret;
329
330 do {
331 /* reset chip */
332 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
333 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
334 iowrite8(1, &card->dpram[DPRAM_RESET]);
335 iowrite8(0, &card->dpram[DPRAM_RESET+1]);
336
337 ret = softing_fct_cmd(card, 0, "reset_can");
338 if (!ret)
339 break;
340 if (signal_pending(current))
341 /* don't wait any longer */
342 break;
343 } while (1);
344 card->tx.pending = 0;
345 return ret;
346}
347
348int softing_chip_poweron(struct softing *card)
349{
350 int ret;
351 /* sync */
352 ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
353 if (ret < 0)
354 goto failed;
355
356 ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
357 if (ret < 0)
358 goto failed;
359
360 ret = softing_reset_chip(card);
361 if (ret < 0)
362 goto failed;
363 /* get_serial */
364 ret = softing_fct_cmd(card, 43, "get_serial_number");
365 if (ret < 0)
366 goto failed;
367 card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
368 /* get_version */
369 ret = softing_fct_cmd(card, 12, "get_version");
370 if (ret < 0)
371 goto failed;
372 card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
373 card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
374 card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
375 card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
376 card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
377 return 0;
378failed:
379 return ret;
380}
381
382static void softing_initialize_timestamp(struct softing *card)
383{
384 uint64_t ovf;
385
386 card->ts_ref = ktime_get();
387
388 /* 16MHz is the reference */
389 ovf = 0x100000000ULL * 16;
390 do_div(ovf, card->pdat->freq ?: 16);
391
392 card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
393}
394
395ktime_t softing_raw2ktime(struct softing *card, u32 raw)
396{
397 uint64_t rawl;
398 ktime_t now, real_offset;
399 ktime_t target;
400 ktime_t tmp;
401
402 now = ktime_get();
403 real_offset = ktime_sub(ktime_get_real(), now);
404
405 /* find nsec from card */
406 rawl = raw * 16;
407 do_div(rawl, card->pdat->freq ?: 16);
408 target = ktime_add_us(card->ts_ref, rawl);
409 /* test for overflows */
410 tmp = ktime_add(target, card->ts_overflow);
411 while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
412 card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
413 target = tmp;
414 tmp = ktime_add(target, card->ts_overflow);
415 }
416 return ktime_add(target, real_offset);
417}
418
419static inline int softing_error_reporting(struct net_device *netdev)
420{
421 struct softing_priv *priv = netdev_priv(netdev);
422
423 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
424 ? 1 : 0;
425}
426
427int softing_startstop(struct net_device *dev, int up)
428{
429 int ret;
430 struct softing *card;
431 struct softing_priv *priv;
432 struct net_device *netdev;
433 int bus_bitmask_start;
434 int j, error_reporting;
435 struct can_frame msg;
436 const struct can_bittiming *bt;
437
438 priv = netdev_priv(dev);
439 card = priv->card;
440
441 if (!card->fw.up)
442 return -EIO;
443
444 ret = mutex_lock_interruptible(&card->fw.lock);
445 if (ret)
446 return ret;
447
448 bus_bitmask_start = 0;
449 if (dev && up)
450 /* prepare to start this bus as well */
451 bus_bitmask_start |= (1 << priv->index);
452 /* bring netdevs down */
453 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
454 netdev = card->net[j];
455 if (!netdev)
456 continue;
457 priv = netdev_priv(netdev);
458
459 if (dev != netdev)
460 netif_stop_queue(netdev);
461
462 if (netif_running(netdev)) {
463 if (dev != netdev)
464 bus_bitmask_start |= (1 << j);
465 priv->tx.pending = 0;
466 priv->tx.echo_put = 0;
467 priv->tx.echo_get = 0;
468 /*
469 * this bus' may just have called open_candev()
470 * which is rather stupid to call close_candev()
471 * already
472 * but we may come here from busoff recovery too
473 * in which case the echo_skb _needs_ flushing too.
474 * just be sure to call open_candev() again
475 */
476 close_candev(netdev);
477 }
478 priv->can.state = CAN_STATE_STOPPED;
479 }
480 card->tx.pending = 0;
481
482 softing_enable_irq(card, 0);
483 ret = softing_reset_chip(card);
484 if (ret)
485 goto failed;
486 if (!bus_bitmask_start)
487 /* no busses to be brought up */
488 goto card_done;
489
490 if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
491 && (softing_error_reporting(card->net[0])
492 != softing_error_reporting(card->net[1]))) {
493 dev_alert(&card->pdev->dev,
494 "err_reporting flag differs for busses\n");
495 goto invalid;
496 }
497 error_reporting = 0;
498 if (bus_bitmask_start & 1) {
499 netdev = card->net[0];
500 priv = netdev_priv(netdev);
501 error_reporting += softing_error_reporting(netdev);
502 /* init chip 1 */
503 bt = &priv->can.bittiming;
504 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
505 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
506 iowrite16(bt->phase_seg1 + bt->prop_seg,
507 &card->dpram[DPRAM_FCT_PARAM + 6]);
508 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
509 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
510 &card->dpram[DPRAM_FCT_PARAM + 10]);
511 ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
512 if (ret < 0)
513 goto failed;
514 /* set mode */
515 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
516 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
517 ret = softing_fct_cmd(card, 3, "set_mode[0]");
518 if (ret < 0)
519 goto failed;
520 /* set filter */
521 /* 11bit id & mask */
522 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
523 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
524 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
525 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
526 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
527 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
528 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
529 ret = softing_fct_cmd(card, 7, "set_filter[0]");
530 if (ret < 0)
531 goto failed;
532 /* set output control */
533 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
534 ret = softing_fct_cmd(card, 5, "set_output[0]");
535 if (ret < 0)
536 goto failed;
537 }
538 if (bus_bitmask_start & 2) {
539 netdev = card->net[1];
540 priv = netdev_priv(netdev);
541 error_reporting += softing_error_reporting(netdev);
542 /* init chip2 */
543 bt = &priv->can.bittiming;
544 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
545 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
546 iowrite16(bt->phase_seg1 + bt->prop_seg,
547 &card->dpram[DPRAM_FCT_PARAM + 6]);
548 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
549 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
550 &card->dpram[DPRAM_FCT_PARAM + 10]);
551 ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
552 if (ret < 0)
553 goto failed;
554 /* set mode2 */
555 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
556 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
557 ret = softing_fct_cmd(card, 4, "set_mode[1]");
558 if (ret < 0)
559 goto failed;
560 /* set filter2 */
561 /* 11bit id & mask */
562 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
563 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
564 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
565 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
566 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
567 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
568 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
569 ret = softing_fct_cmd(card, 8, "set_filter[1]");
570 if (ret < 0)
571 goto failed;
572 /* set output control2 */
573 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
574 ret = softing_fct_cmd(card, 6, "set_output[1]");
575 if (ret < 0)
576 goto failed;
577 }
578 /* enable_error_frame */
579 /*
580 * Error reporting is switched off at the moment since
581 * the receiving of them is not yet 100% verified
582 * This should be enabled sooner or later
583 *
584 if (error_reporting) {
585 ret = softing_fct_cmd(card, 51, "enable_error_frame");
586 if (ret < 0)
587 goto failed;
588 }
589 */
590 /* initialize interface */
591 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
592 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
593 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
594 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
595 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
596 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
597 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
598 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
599 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
600 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
601 ret = softing_fct_cmd(card, 17, "initialize_interface");
602 if (ret < 0)
603 goto failed;
604 /* enable_fifo */
605 ret = softing_fct_cmd(card, 36, "enable_fifo");
606 if (ret < 0)
607 goto failed;
608 /* enable fifo tx ack */
609 ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
610 if (ret < 0)
611 goto failed;
612 /* enable fifo tx ack2 */
613 ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
614 if (ret < 0)
615 goto failed;
616 /* start_chip */
617 ret = softing_fct_cmd(card, 11, "start_chip");
618 if (ret < 0)
619 goto failed;
620 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
621 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
622 if (card->pdat->generation < 2) {
623 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
624 /* flush the DPRAM caches */
625 wmb();
626 }
627
628 softing_initialize_timestamp(card);
629
630 /*
631 * do socketcan notifications/status changes
632 * from here, no errors should occur, or the failed: part
633 * must be reviewed
634 */
635 memset(&msg, 0, sizeof(msg));
636 msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
637 msg.can_dlc = CAN_ERR_DLC;
638 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
639 if (!(bus_bitmask_start & (1 << j)))
640 continue;
641 netdev = card->net[j];
642 if (!netdev)
643 continue;
644 priv = netdev_priv(netdev);
645 priv->can.state = CAN_STATE_ERROR_ACTIVE;
646 open_candev(netdev);
647 if (dev != netdev) {
648 /* notify other busses on the restart */
649 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
650 ++priv->can.can_stats.restarts;
651 }
652 netif_wake_queue(netdev);
653 }
654
655 /* enable interrupts */
656 ret = softing_enable_irq(card, 1);
657 if (ret)
658 goto failed;
659card_done:
660 mutex_unlock(&card->fw.lock);
661 return 0;
662invalid:
663 ret = -EINVAL;
664failed:
665 softing_enable_irq(card, 0);
666 softing_reset_chip(card);
667 mutex_unlock(&card->fw.lock);
668 /* bring all other interfaces down */
669 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
670 netdev = card->net[j];
671 if (!netdev)
672 continue;
673 dev_close(netdev);
674 }
675 return ret;
676}
677
678int softing_default_output(struct net_device *netdev)
679{
680 struct softing_priv *priv = netdev_priv(netdev);
681 struct softing *card = priv->card;
682
683 switch (priv->chip) {
684 case 1000:
685 return (card->pdat->generation < 2) ? 0xfb : 0xfa;
686 case 5:
687 return 0x60;
688 default:
689 return 0x40;
690 }
691}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..5157e15e96eb
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,893 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/version.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24
25#include "softing.h"
26
27#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
28
29/*
30 * test is a specific CAN netdev
31 * is online (ie. up 'n running, not sleeping, not busoff
32 */
33static inline int canif_is_active(struct net_device *netdev)
34{
35 struct can_priv *can = netdev_priv(netdev);
36
37 if (!netif_running(netdev))
38 return 0;
39 return (can->state <= CAN_STATE_ERROR_PASSIVE);
40}
41
42/* reset DPRAM */
43static inline void softing_set_reset_dpram(struct softing *card)
44{
45 if (card->pdat->generation >= 2) {
46 spin_lock_bh(&card->spin);
47 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
48 &card->dpram[DPRAM_V2_RESET]);
49 spin_unlock_bh(&card->spin);
50 }
51}
52
53static inline void softing_clr_reset_dpram(struct softing *card)
54{
55 if (card->pdat->generation >= 2) {
56 spin_lock_bh(&card->spin);
57 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
58 &card->dpram[DPRAM_V2_RESET]);
59 spin_unlock_bh(&card->spin);
60 }
61}
62
63/* trigger the tx queue-ing */
64static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
65 struct net_device *dev)
66{
67 struct softing_priv *priv = netdev_priv(dev);
68 struct softing *card = priv->card;
69 int ret;
70 uint8_t *ptr;
71 uint8_t fifo_wr, fifo_rd;
72 struct can_frame *cf = (struct can_frame *)skb->data;
73 uint8_t buf[DPRAM_TX_SIZE];
74
75 if (can_dropped_invalid_skb(dev, skb))
76 return NETDEV_TX_OK;
77
78 spin_lock(&card->spin);
79
80 ret = NETDEV_TX_BUSY;
81 if (!card->fw.up ||
82 (card->tx.pending >= TXMAX) ||
83 (priv->tx.pending >= TX_ECHO_SKB_MAX))
84 goto xmit_done;
85 fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
86 fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
87 if (fifo_wr == fifo_rd)
88 /* fifo full */
89 goto xmit_done;
90 memset(buf, 0, sizeof(buf));
91 ptr = buf;
92 *ptr = CMD_TX;
93 if (cf->can_id & CAN_RTR_FLAG)
94 *ptr |= CMD_RTR;
95 if (cf->can_id & CAN_EFF_FLAG)
96 *ptr |= CMD_XTD;
97 if (priv->index)
98 *ptr |= CMD_BUS2;
99 ++ptr;
100 *ptr++ = cf->can_dlc;
101 *ptr++ = (cf->can_id >> 0);
102 *ptr++ = (cf->can_id >> 8);
103 if (cf->can_id & CAN_EFF_FLAG) {
104 *ptr++ = (cf->can_id >> 16);
105 *ptr++ = (cf->can_id >> 24);
106 } else {
107 /* increment 1, not 2 as you might think */
108 ptr += 1;
109 }
110 if (!(cf->can_id & CAN_RTR_FLAG))
111 memcpy(ptr, &cf->data[0], cf->can_dlc);
112 memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
113 buf, DPRAM_TX_SIZE);
114 if (++fifo_wr >= DPRAM_TX_CNT)
115 fifo_wr = 0;
116 iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
117 card->tx.last_bus = priv->index;
118 ++card->tx.pending;
119 ++priv->tx.pending;
120 can_put_echo_skb(skb, dev, priv->tx.echo_put);
121 ++priv->tx.echo_put;
122 if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
123 priv->tx.echo_put = 0;
124 /* can_put_echo_skb() saves the skb, safe to return TX_OK */
125 ret = NETDEV_TX_OK;
126xmit_done:
127 spin_unlock(&card->spin);
128 if (card->tx.pending >= TXMAX) {
129 int j;
130 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
131 if (card->net[j])
132 netif_stop_queue(card->net[j]);
133 }
134 }
135 if (ret != NETDEV_TX_OK)
136 netif_stop_queue(dev);
137
138 return ret;
139}
140
141/*
142 * shortcut for skb delivery
143 */
144int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
145 ktime_t ktime)
146{
147 struct sk_buff *skb;
148 struct can_frame *cf;
149
150 skb = alloc_can_skb(netdev, &cf);
151 if (!skb)
152 return -ENOMEM;
153 memcpy(cf, msg, sizeof(*msg));
154 skb->tstamp = ktime;
155 return netif_rx(skb);
156}
157
158/*
159 * softing_handle_1
160 * pop 1 entry from the DPRAM queue, and process
161 */
162static int softing_handle_1(struct softing *card)
163{
164 struct net_device *netdev;
165 struct softing_priv *priv;
166 ktime_t ktime;
167 struct can_frame msg;
168 int cnt = 0, lost_msg;
169 uint8_t fifo_rd, fifo_wr, cmd;
170 uint8_t *ptr;
171 uint32_t tmp_u32;
172 uint8_t buf[DPRAM_RX_SIZE];
173
174 memset(&msg, 0, sizeof(msg));
175 /* test for lost msgs */
176 lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
177 if (lost_msg) {
178 int j;
179 /* reset condition */
180 iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
181 /* prepare msg */
182 msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
183 msg.can_dlc = CAN_ERR_DLC;
184 msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
185 /*
186 * service to all busses, we don't know which it was applicable
187 * but only service busses that are online
188 */
189 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
190 netdev = card->net[j];
191 if (!netdev)
192 continue;
193 if (!canif_is_active(netdev))
194 /* a dead bus has no overflows */
195 continue;
196 ++netdev->stats.rx_over_errors;
197 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
198 }
199 /* prepare for other use */
200 memset(&msg, 0, sizeof(msg));
201 ++cnt;
202 }
203
204 fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
205 fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
206
207 if (++fifo_rd >= DPRAM_RX_CNT)
208 fifo_rd = 0;
209 if (fifo_wr == fifo_rd)
210 return cnt;
211
212 memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
213 DPRAM_RX_SIZE);
214 mb();
215 /* trigger dual port RAM */
216 iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
217
218 ptr = buf;
219 cmd = *ptr++;
220 if (cmd == 0xff)
221 /* not quite usefull, probably the card has got out */
222 return 0;
223 netdev = card->net[0];
224 if (cmd & CMD_BUS2)
225 netdev = card->net[1];
226 priv = netdev_priv(netdev);
227
228 if (cmd & CMD_ERR) {
229 uint8_t can_state, state;
230
231 state = *ptr++;
232
233 msg.can_id = CAN_ERR_FLAG;
234 msg.can_dlc = CAN_ERR_DLC;
235
236 if (state & SF_MASK_BUSOFF) {
237 can_state = CAN_STATE_BUS_OFF;
238 msg.can_id |= CAN_ERR_BUSOFF;
239 state = STATE_BUSOFF;
240 } else if (state & SF_MASK_EPASSIVE) {
241 can_state = CAN_STATE_ERROR_PASSIVE;
242 msg.can_id |= CAN_ERR_CRTL;
243 msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
244 state = STATE_EPASSIVE;
245 } else {
246 can_state = CAN_STATE_ERROR_ACTIVE;
247 msg.can_id |= CAN_ERR_CRTL;
248 state = STATE_EACTIVE;
249 }
250 /* update DPRAM */
251 iowrite8(state, &card->dpram[priv->index ?
252 DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
253 /* timestamp */
254 tmp_u32 = le32_to_cpup((void *)ptr);
255 ptr += 4;
256 ktime = softing_raw2ktime(card, tmp_u32);
257
258 ++netdev->stats.rx_errors;
259 /* update internal status */
260 if (can_state != priv->can.state) {
261 priv->can.state = can_state;
262 if (can_state == CAN_STATE_ERROR_PASSIVE)
263 ++priv->can.can_stats.error_passive;
264 else if (can_state == CAN_STATE_BUS_OFF) {
265 /* this calls can_close_cleanup() */
266 can_bus_off(netdev);
267 netif_stop_queue(netdev);
268 }
269 /* trigger socketcan */
270 softing_netdev_rx(netdev, &msg, ktime);
271 }
272
273 } else {
274 if (cmd & CMD_RTR)
275 msg.can_id |= CAN_RTR_FLAG;
276 msg.can_dlc = get_can_dlc(*ptr++);
277 if (cmd & CMD_XTD) {
278 msg.can_id |= CAN_EFF_FLAG;
279 msg.can_id |= le32_to_cpup((void *)ptr);
280 ptr += 4;
281 } else {
282 msg.can_id |= le16_to_cpup((void *)ptr);
283 ptr += 2;
284 }
285 /* timestamp */
286 tmp_u32 = le32_to_cpup((void *)ptr);
287 ptr += 4;
288 ktime = softing_raw2ktime(card, tmp_u32);
289 if (!(msg.can_id & CAN_RTR_FLAG))
290 memcpy(&msg.data[0], ptr, 8);
291 ptr += 8;
292 /* update socket */
293 if (cmd & CMD_ACK) {
294 /* acknowledge, was tx msg */
295 struct sk_buff *skb;
296 skb = priv->can.echo_skb[priv->tx.echo_get];
297 if (skb)
298 skb->tstamp = ktime;
299 can_get_echo_skb(netdev, priv->tx.echo_get);
300 ++priv->tx.echo_get;
301 if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
302 priv->tx.echo_get = 0;
303 if (priv->tx.pending)
304 --priv->tx.pending;
305 if (card->tx.pending)
306 --card->tx.pending;
307 ++netdev->stats.tx_packets;
308 if (!(msg.can_id & CAN_RTR_FLAG))
309 netdev->stats.tx_bytes += msg.can_dlc;
310 } else {
311 int ret;
312
313 ret = softing_netdev_rx(netdev, &msg, ktime);
314 if (ret == NET_RX_SUCCESS) {
315 ++netdev->stats.rx_packets;
316 if (!(msg.can_id & CAN_RTR_FLAG))
317 netdev->stats.rx_bytes += msg.can_dlc;
318 } else {
319 ++netdev->stats.rx_dropped;
320 }
321 }
322 }
323 ++cnt;
324 return cnt;
325}
326
327/*
328 * real interrupt handler
329 */
330static irqreturn_t softing_irq_thread(int irq, void *dev_id)
331{
332 struct softing *card = (struct softing *)dev_id;
333 struct net_device *netdev;
334 struct softing_priv *priv;
335 int j, offset, work_done;
336
337 work_done = 0;
338 spin_lock_bh(&card->spin);
339 while (softing_handle_1(card) > 0) {
340 ++card->irq.svc_count;
341 ++work_done;
342 }
343 spin_unlock_bh(&card->spin);
344 /* resume tx queue's */
345 offset = card->tx.last_bus;
346 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
347 if (card->tx.pending >= TXMAX)
348 break;
349 netdev = card->net[(j + offset + 1) % card->pdat->nbus];
350 if (!netdev)
351 continue;
352 priv = netdev_priv(netdev);
353 if (!canif_is_active(netdev))
354 /* it makes no sense to wake dead busses */
355 continue;
356 if (priv->tx.pending >= TX_ECHO_SKB_MAX)
357 continue;
358 ++work_done;
359 netif_wake_queue(netdev);
360 }
361 return work_done ? IRQ_HANDLED : IRQ_NONE;
362}
363
364/*
365 * interrupt routines:
366 * schedule the 'real interrupt handler'
367 */
368static irqreturn_t softing_irq_v2(int irq, void *dev_id)
369{
370 struct softing *card = (struct softing *)dev_id;
371 uint8_t ir;
372
373 ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
374 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
375 return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
376}
377
378static irqreturn_t softing_irq_v1(int irq, void *dev_id)
379{
380 struct softing *card = (struct softing *)dev_id;
381 uint8_t ir;
382
383 ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
384 iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
385 return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
386}
387
388/*
389 * netdev/candev inter-operability
390 */
391static int softing_netdev_open(struct net_device *ndev)
392{
393 int ret;
394
395 /* check or determine and set bittime */
396 ret = open_candev(ndev);
397 if (!ret)
398 ret = softing_startstop(ndev, 1);
399 return ret;
400}
401
402static int softing_netdev_stop(struct net_device *ndev)
403{
404 int ret;
405
406 netif_stop_queue(ndev);
407
408 /* softing cycle does close_candev() */
409 ret = softing_startstop(ndev, 0);
410 return ret;
411}
412
413static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
414{
415 int ret;
416
417 switch (mode) {
418 case CAN_MODE_START:
419 /* softing_startstop does close_candev() */
420 ret = softing_startstop(ndev, 1);
421 return ret;
422 case CAN_MODE_STOP:
423 case CAN_MODE_SLEEP:
424 return -EOPNOTSUPP;
425 }
426 return 0;
427}
428
429/*
430 * Softing device management helpers
431 */
432int softing_enable_irq(struct softing *card, int enable)
433{
434 int ret;
435
436 if (!card->irq.nr) {
437 return 0;
438 } else if (card->irq.requested && !enable) {
439 free_irq(card->irq.nr, card);
440 card->irq.requested = 0;
441 } else if (!card->irq.requested && enable) {
442 ret = request_threaded_irq(card->irq.nr,
443 (card->pdat->generation >= 2) ?
444 softing_irq_v2 : softing_irq_v1,
445 softing_irq_thread, IRQF_SHARED,
446 dev_name(&card->pdev->dev), card);
447 if (ret) {
448 dev_alert(&card->pdev->dev,
449 "request_threaded_irq(%u) failed\n",
450 card->irq.nr);
451 return ret;
452 }
453 card->irq.requested = 1;
454 }
455 return 0;
456}
457
458static void softing_card_shutdown(struct softing *card)
459{
460 int fw_up = 0;
461
462 if (mutex_lock_interruptible(&card->fw.lock))
463 /* return -ERESTARTSYS */;
464 fw_up = card->fw.up;
465 card->fw.up = 0;
466
467 if (card->irq.requested && card->irq.nr) {
468 free_irq(card->irq.nr, card);
469 card->irq.requested = 0;
470 }
471 if (fw_up) {
472 if (card->pdat->enable_irq)
473 card->pdat->enable_irq(card->pdev, 0);
474 softing_set_reset_dpram(card);
475 if (card->pdat->reset)
476 card->pdat->reset(card->pdev, 1);
477 }
478 mutex_unlock(&card->fw.lock);
479}
480
481static __devinit int softing_card_boot(struct softing *card)
482{
483 int ret, j;
484 static const uint8_t stream[] = {
485 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
486 unsigned char back[sizeof(stream)];
487
488 if (mutex_lock_interruptible(&card->fw.lock))
489 return -ERESTARTSYS;
490 if (card->fw.up) {
491 mutex_unlock(&card->fw.lock);
492 return 0;
493 }
494 /* reset board */
495 if (card->pdat->enable_irq)
496 card->pdat->enable_irq(card->pdev, 1);
497 /* boot card */
498 softing_set_reset_dpram(card);
499 if (card->pdat->reset)
500 card->pdat->reset(card->pdev, 1);
501 for (j = 0; (j + sizeof(stream)) < card->dpram_size;
502 j += sizeof(stream)) {
503
504 memcpy_toio(&card->dpram[j], stream, sizeof(stream));
505 /* flush IO cache */
506 mb();
507 memcpy_fromio(back, &card->dpram[j], sizeof(stream));
508
509 if (!memcmp(back, stream, sizeof(stream)))
510 continue;
511 /* memory is not equal */
512 dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
513 ret = -EIO;
514 goto failed;
515 }
516 wmb();
517 /* load boot firmware */
518 ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
519 card->dpram_size,
520 card->pdat->boot.offs - card->pdat->boot.addr);
521 if (ret < 0)
522 goto failed;
523 /* load loader firmware */
524 ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
525 card->dpram_size,
526 card->pdat->load.offs - card->pdat->load.addr);
527 if (ret < 0)
528 goto failed;
529
530 if (card->pdat->reset)
531 card->pdat->reset(card->pdev, 0);
532 softing_clr_reset_dpram(card);
533 ret = softing_bootloader_command(card, 0, "card boot");
534 if (ret < 0)
535 goto failed;
536 ret = softing_load_app_fw(card->pdat->app.fw, card);
537 if (ret < 0)
538 goto failed;
539
540 ret = softing_chip_poweron(card);
541 if (ret < 0)
542 goto failed;
543
544 card->fw.up = 1;
545 mutex_unlock(&card->fw.lock);
546 return 0;
547failed:
548 card->fw.up = 0;
549 if (card->pdat->enable_irq)
550 card->pdat->enable_irq(card->pdev, 0);
551 softing_set_reset_dpram(card);
552 if (card->pdat->reset)
553 card->pdat->reset(card->pdev, 1);
554 mutex_unlock(&card->fw.lock);
555 return ret;
556}
557
558/*
559 * netdev sysfs
560 */
561static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
562 char *buf)
563{
564 struct net_device *ndev = to_net_dev(dev);
565 struct softing_priv *priv = netdev2softing(ndev);
566
567 return sprintf(buf, "%i\n", priv->index);
568}
569
570static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
571 char *buf)
572{
573 struct net_device *ndev = to_net_dev(dev);
574 struct softing_priv *priv = netdev2softing(ndev);
575
576 return sprintf(buf, "%i\n", priv->chip);
577}
578
579static ssize_t show_output(struct device *dev, struct device_attribute *attr,
580 char *buf)
581{
582 struct net_device *ndev = to_net_dev(dev);
583 struct softing_priv *priv = netdev2softing(ndev);
584
585 return sprintf(buf, "0x%02x\n", priv->output);
586}
587
588static ssize_t store_output(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 struct net_device *ndev = to_net_dev(dev);
592 struct softing_priv *priv = netdev2softing(ndev);
593 struct softing *card = priv->card;
594 unsigned long val;
595 int ret;
596
597 ret = strict_strtoul(buf, 0, &val);
598 if (ret < 0)
599 return ret;
600 val &= 0xFF;
601
602 ret = mutex_lock_interruptible(&card->fw.lock);
603 if (ret)
604 return -ERESTARTSYS;
605 if (netif_running(ndev)) {
606 mutex_unlock(&card->fw.lock);
607 return -EBUSY;
608 }
609 priv->output = val;
610 mutex_unlock(&card->fw.lock);
611 return count;
612}
613
614static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
615static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
616static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
617
618static const struct attribute *const netdev_sysfs_attrs[] = {
619 &dev_attr_channel.attr,
620 &dev_attr_chip.attr,
621 &dev_attr_output.attr,
622 NULL,
623};
624static const struct attribute_group netdev_sysfs_group = {
625 .name = NULL,
626 .attrs = (struct attribute **)netdev_sysfs_attrs,
627};
628
629static const struct net_device_ops softing_netdev_ops = {
630 .ndo_open = softing_netdev_open,
631 .ndo_stop = softing_netdev_stop,
632 .ndo_start_xmit = softing_netdev_start_xmit,
633};
634
635static const struct can_bittiming_const softing_btr_const = {
636 .tseg1_min = 1,
637 .tseg1_max = 16,
638 .tseg2_min = 1,
639 .tseg2_max = 8,
640 .sjw_max = 4, /* overruled */
641 .brp_min = 1,
642 .brp_max = 32, /* overruled */
643 .brp_inc = 1,
644};
645
646
647static __devinit struct net_device *softing_netdev_create(struct softing *card,
648 uint16_t chip_id)
649{
650 struct net_device *netdev;
651 struct softing_priv *priv;
652
653 netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
654 if (!netdev) {
655 dev_alert(&card->pdev->dev, "alloc_candev failed\n");
656 return NULL;
657 }
658 priv = netdev_priv(netdev);
659 priv->netdev = netdev;
660 priv->card = card;
661 memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
662 priv->btr_const.brp_max = card->pdat->max_brp;
663 priv->btr_const.sjw_max = card->pdat->max_sjw;
664 priv->can.bittiming_const = &priv->btr_const;
665 priv->can.clock.freq = 8000000;
666 priv->chip = chip_id;
667 priv->output = softing_default_output(netdev);
668 SET_NETDEV_DEV(netdev, &card->pdev->dev);
669
670 netdev->flags |= IFF_ECHO;
671 netdev->netdev_ops = &softing_netdev_ops;
672 priv->can.do_set_mode = softing_candev_set_mode;
673 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
674
675 return netdev;
676}
677
678static __devinit int softing_netdev_register(struct net_device *netdev)
679{
680 int ret;
681
682 netdev->sysfs_groups[0] = &netdev_sysfs_group;
683 ret = register_candev(netdev);
684 if (ret) {
685 dev_alert(&netdev->dev, "register failed\n");
686 return ret;
687 }
688 return 0;
689}
690
691static void softing_netdev_cleanup(struct net_device *netdev)
692{
693 unregister_candev(netdev);
694 free_candev(netdev);
695}
696
697/*
698 * sysfs for Platform device
699 */
700#define DEV_ATTR_RO(name, member) \
701static ssize_t show_##name(struct device *dev, \
702 struct device_attribute *attr, char *buf) \
703{ \
704 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
705 return sprintf(buf, "%u\n", card->member); \
706} \
707static DEVICE_ATTR(name, 0444, show_##name, NULL)
708
709#define DEV_ATTR_RO_STR(name, member) \
710static ssize_t show_##name(struct device *dev, \
711 struct device_attribute *attr, char *buf) \
712{ \
713 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
714 return sprintf(buf, "%s\n", card->member); \
715} \
716static DEVICE_ATTR(name, 0444, show_##name, NULL)
717
718DEV_ATTR_RO(serial, id.serial);
719DEV_ATTR_RO_STR(firmware, pdat->app.fw);
720DEV_ATTR_RO(firmware_version, id.fw_version);
721DEV_ATTR_RO_STR(hardware, pdat->name);
722DEV_ATTR_RO(hardware_version, id.hw_version);
723DEV_ATTR_RO(license, id.license);
724DEV_ATTR_RO(frequency, id.freq);
725DEV_ATTR_RO(txpending, tx.pending);
726
727static struct attribute *softing_pdev_attrs[] = {
728 &dev_attr_serial.attr,
729 &dev_attr_firmware.attr,
730 &dev_attr_firmware_version.attr,
731 &dev_attr_hardware.attr,
732 &dev_attr_hardware_version.attr,
733 &dev_attr_license.attr,
734 &dev_attr_frequency.attr,
735 &dev_attr_txpending.attr,
736 NULL,
737};
738
739static const struct attribute_group softing_pdev_group = {
740 .name = NULL,
741 .attrs = softing_pdev_attrs,
742};
743
744/*
745 * platform driver
746 */
747static __devexit int softing_pdev_remove(struct platform_device *pdev)
748{
749 struct softing *card = platform_get_drvdata(pdev);
750 int j;
751
752 /* first, disable card*/
753 softing_card_shutdown(card);
754
755 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
756 if (!card->net[j])
757 continue;
758 softing_netdev_cleanup(card->net[j]);
759 card->net[j] = NULL;
760 }
761 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
762
763 iounmap(card->dpram);
764 kfree(card);
765 return 0;
766}
767
768static __devinit int softing_pdev_probe(struct platform_device *pdev)
769{
770 const struct softing_platform_data *pdat = pdev->dev.platform_data;
771 struct softing *card;
772 struct net_device *netdev;
773 struct softing_priv *priv;
774 struct resource *pres;
775 int ret;
776 int j;
777
778 if (!pdat) {
779 dev_warn(&pdev->dev, "no platform data\n");
780 return -EINVAL;
781 }
782 if (pdat->nbus > ARRAY_SIZE(card->net)) {
783 dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
784 return -EINVAL;
785 }
786
787 card = kzalloc(sizeof(*card), GFP_KERNEL);
788 if (!card)
789 return -ENOMEM;
790 card->pdat = pdat;
791 card->pdev = pdev;
792 platform_set_drvdata(pdev, card);
793 mutex_init(&card->fw.lock);
794 spin_lock_init(&card->spin);
795
796 ret = -EINVAL;
797 pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
798 if (!pres)
799 goto platform_resource_failed;;
800 card->dpram_phys = pres->start;
801 card->dpram_size = pres->end - pres->start + 1;
802 card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
803 if (!card->dpram) {
804 dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
805 goto ioremap_failed;
806 }
807
808 pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
809 if (pres)
810 card->irq.nr = pres->start;
811
812 /* reset card */
813 ret = softing_card_boot(card);
814 if (ret < 0) {
815 dev_alert(&pdev->dev, "failed to boot\n");
816 goto boot_failed;
817 }
818
819 /* only now, the chip's are known */
820 card->id.freq = card->pdat->freq;
821
822 ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
823 if (ret < 0) {
824 dev_alert(&card->pdev->dev, "sysfs failed\n");
825 goto sysfs_failed;
826 }
827
828 ret = -ENOMEM;
829 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
830 card->net[j] = netdev =
831 softing_netdev_create(card, card->id.chip[j]);
832 if (!netdev) {
833 dev_alert(&pdev->dev, "failed to make can[%i]", j);
834 goto netdev_failed;
835 }
836 priv = netdev_priv(card->net[j]);
837 priv->index = j;
838 ret = softing_netdev_register(netdev);
839 if (ret) {
840 free_candev(netdev);
841 card->net[j] = NULL;
842 dev_alert(&card->pdev->dev,
843 "failed to register can[%i]\n", j);
844 goto netdev_failed;
845 }
846 }
847 dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
848 return 0;
849
850netdev_failed:
851 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
852 if (!card->net[j])
853 continue;
854 softing_netdev_cleanup(card->net[j]);
855 }
856 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
857sysfs_failed:
858 softing_card_shutdown(card);
859boot_failed:
860 iounmap(card->dpram);
861ioremap_failed:
862platform_resource_failed:
863 kfree(card);
864 return ret;
865}
866
867static struct platform_driver softing_driver = {
868 .driver = {
869 .name = "softing",
870 .owner = THIS_MODULE,
871 },
872 .probe = softing_pdev_probe,
873 .remove = __devexit_p(softing_pdev_remove),
874};
875
876MODULE_ALIAS("platform:softing");
877
878static int __init softing_start(void)
879{
880 return platform_driver_register(&softing_driver);
881}
882
883static void __exit softing_stop(void)
884{
885 platform_driver_unregister(&softing_driver);
886}
887
888module_init(softing_start);
889module_exit(softing_stop);
890
891MODULE_DESCRIPTION("Softing DPRAM CAN driver");
892MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
893MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
1
2#include <linux/platform_device.h>
3
4#ifndef _SOFTING_DEVICE_H_
5#define _SOFTING_DEVICE_H_
6
7/* softing firmware directory prefix */
8#define fw_dir "softing-4.6/"
9
10struct softing_platform_data {
11 unsigned int manf;
12 unsigned int prod;
13 /*
14 * generation
15 * 1st with NEC or SJA1000
16 * 8bit, exclusive interrupt, ...
17 * 2nd only SJA1000
18 * 16bit, shared interrupt
19 */
20 int generation;
21 int nbus; /* # busses on device */
22 unsigned int freq; /* operating frequency in Hz */
23 unsigned int max_brp;
24 unsigned int max_sjw;
25 unsigned long dpram_size;
26 const char *name;
27 struct {
28 unsigned long offs;
29 unsigned long addr;
30 const char *fw;
31 } boot, load, app;
32 /*
33 * reset() function
34 * bring pdev in or out of reset, depending on value
35 */
36 int (*reset)(struct platform_device *pdev, int value);
37 int (*enable_irq)(struct platform_device *pdev, int value);
38};
39
40#endif
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2cbbf8..3437613f0454 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3204 int mac_off = 0; 3204 int mac_off = 0;
3205 3205
3206#if defined(CONFIG_OF) 3206#if defined(CONFIG_SPARC)
3207 const unsigned char *addr; 3207 const unsigned char *addr;
3208#endif 3208#endif
3209 3209
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
3354 if (found & VPD_FOUND_MAC) 3354 if (found & VPD_FOUND_MAC)
3355 goto done; 3355 goto done;
3356 3356
3357#if defined(CONFIG_OF) 3357#if defined(CONFIG_SPARC)
3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL); 3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3359 if (addr != NULL) { 3359 if (addr != NULL) {
3360 memcpy(dev_addr, addr, 6); 3360 memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5032 cassini_debug; 5032 cassini_debug;
5033 5033
5034#if defined(CONFIG_OF) 5034#if defined(CONFIG_SPARC)
5035 cp->of_node = pci_device_to_OF_node(pdev); 5035 cp->of_node = pci_device_to_OF_node(pdev);
5036#endif 5036#endif
5037 5037
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566f..7ff170cbc7dc 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
700{ 700{
701 int i; 701 int i;
702 u32 *page_table = dma->pgtbl; 702 __le32 *page_table = (__le32 *) dma->pgtbl;
703 703
704 for (i = 0; i < dma->num_pages; i++) { 704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */ 705 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 706 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
707 page_table++; 707 page_table++;
708 *page_table = (u32) dma->pg_map_arr[i]; 708 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
709 page_table++; 709 page_table++;
710 } 710 }
711} 711}
@@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
714{ 714{
715 int i; 715 int i;
716 u32 *page_table = dma->pgtbl; 716 __le32 *page_table = (__le32 *) dma->pgtbl;
717 717
718 for (i = 0; i < dma->num_pages; i++) { 718 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */ 719 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff; 720 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
721 page_table++; 721 page_table++;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 722 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
723 page_table++; 723 page_table++;
724 } 724 }
725} 725}
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3f..ec35d458102c 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2710,6 +2710,8 @@ static int cxgb_open(struct net_device *dev)
2710 struct port_info *pi = netdev_priv(dev); 2710 struct port_info *pi = netdev_priv(dev);
2711 struct adapter *adapter = pi->adapter; 2711 struct adapter *adapter = pi->adapter;
2712 2712
2713 netif_carrier_off(dev);
2714
2713 if (!(adapter->flags & FULL_INIT_DONE)) { 2715 if (!(adapter->flags & FULL_INIT_DONE)) {
2714 err = cxgb_up(adapter); 2716 err = cxgb_up(adapter);
2715 if (err < 0) 2717 if (err < 0)
@@ -3661,7 +3663,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3661 pi->xact_addr_filt = -1; 3663 pi->xact_addr_filt = -1;
3662 pi->rx_offload = RX_CSO; 3664 pi->rx_offload = RX_CSO;
3663 pi->port_id = i; 3665 pi->port_id = i;
3664 netif_carrier_off(netdev);
3665 netdev->irq = pdev->irq; 3666 netdev->irq = pdev->irq;
3666 3667
3667 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3668 netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4fd..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
1094 } 1094 }
1095 } 1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */ 1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { 1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); 1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 } 1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); 1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
1103 /* 1103 /*
1104 ** Update entry information 1104 ** Update entry information
1105 */ 1105 */
1106 lp->rx_new = (++lp->rx_new) & lp->rxRingMask; 1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 } 1107 }
1108 1108
1109 return 0; 1109 return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
1148 } 1148 }
1149 1149
1150 /* Update all the pointers */ 1150 /* Update all the pointers */
1151 lp->tx_old = (++lp->tx_old) & lp->txRingMask; 1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 } 1152 }
1153 1153
1154 return 0; 1154 return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b897..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
124 case M88E1000_I_PHY_ID: 124 case M88E1000_I_PHY_ID:
125 case M88E1011_I_PHY_ID: 125 case M88E1011_I_PHY_ID:
126 case M88E1111_I_PHY_ID: 126 case M88E1111_I_PHY_ID:
127 case M88E1118_E_PHY_ID:
127 hw->phy_type = e1000_phy_m88; 128 hw->phy_type = e1000_phy_m88;
128 break; 129 break;
129 case IGP01E1000_I_PHY_ID: 130 case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3222 break; 3223 break;
3223 case e1000_ce4100: 3224 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) || 3225 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID)) 3226 (hw->phy_id == RTL8201N_PHY_ID) ||
3227 (hw->phy_id == M88E1118_E_PHY_ID))
3226 match = true; 3228 match = true;
3227 break; 3229 break;
3228 case e1000_82541: 3230 case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6c..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2918#define M88E1011_I_REV_4 0x04 2918#define M88E1011_I_REV_4 0x04
2919#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2920#define M88E1118_E_PHY_ID 0x01410E40
2920#define L1LXT971A_PHY_ID 0x001378E0 2921#define L1LXT971A_PHY_ID 0x001378E0
2921 2922
2922#define RTL8211B_PHY_ID 0x001CC910 2923#define RTL8211B_PHY_ID 0x001CC910
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index de69c54301c1..bfab14092d2c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data)
3478 struct e1000_hw *hw = &adapter->hw; 3478 struct e1000_hw *hw = &adapter->hw;
3479 u32 icr = er32(ICR); 3479 u32 icr = er32(ICR);
3480 3480
3481 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3481 if (unlikely((!icr)))
3482 return IRQ_NONE; /* Not our interrupt */ 3482 return IRQ_NONE; /* Not our interrupt */
3483 3483
3484 /*
3485 * we might have caused the interrupt, but the above
3486 * read cleared it, and just in case the driver is
3487 * down there is nothing to do so return handled
3488 */
3489 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3490 return IRQ_HANDLED;
3491
3484 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3492 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3485 hw->get_link_status = 1; 3493 hw->get_link_status = 1;
3486 /* guard against interrupt when we're going down */ 3494 /* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 1397da118f0d..89a69035e538 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1310 * apply workaround for hardware errata documented in errata 1310 * apply workaround for hardware errata documented in errata
1311 * docs Fixes issue where some error prone or unreliable PCIe 1311 * docs Fixes issue where some error prone or unreliable PCIe
1312 * completions are occurring, particularly with ASPM enabled. 1312 * completions are occurring, particularly with ASPM enabled.
1313 * Without fix, issue can cause tx timeouts. 1313 * Without fix, issue can cause Tx timeouts.
1314 */ 1314 */
1315 reg = er32(GCR2); 1315 reg = er32(GCR2);
1316 reg |= 1; 1316 reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c91369f35..28519acacd2d 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2008 Intel Corporation. 4# Copyright(c) 1999 - 2011 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2e0b7c..13149983d07e 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 5255be753746..e610e1369053 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e45a61c8930a..2fefa820302b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f8ed03dab9b1..fa08b6336cfb 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index e774380c7cec..bc0860a598c9 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ enum e1e_registers {
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ 102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ 103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) 104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106 106
107/* Convenience macros 107/* Convenience macros
108 * 108 *
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 5bb65b7382db..fb46974cfec1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index ff2872153b21..68aa1749bf66 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
533 mac->autoneg_failed = 1; 533 mac->autoneg_failed = 1;
534 return 0; 534 return 0;
535 } 535 }
536 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); 536 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
537 537
538 /* Disable auto-negotiation in the TXCW register */ 538 /* Disable auto-negotiation in the TXCW register */
539 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 539 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
556 * and disable forced link in the Device Control register 556 * and disable forced link in the Device Control register
557 * in an attempt to auto-negotiate with our link partner. 557 * in an attempt to auto-negotiate with our link partner.
558 */ 558 */
559 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); 559 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
560 ew32(TXCW, mac->txcw); 560 ew32(TXCW, mac->txcw);
561 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 561 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
562 562
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
598 mac->autoneg_failed = 1; 598 mac->autoneg_failed = 1;
599 return 0; 599 return 0;
600 } 600 }
601 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); 601 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
602 602
603 /* Disable auto-negotiation in the TXCW register */ 603 /* Disable auto-negotiation in the TXCW register */
604 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 604 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
621 * and disable forced link in the Device Control register 621 * and disable forced link in the Device Control register
622 * in an attempt to auto-negotiate with our link partner. 622 * in an attempt to auto-negotiate with our link partner.
623 */ 623 */
624 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); 624 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
625 ew32(TXCW, mac->txcw); 625 ew32(TXCW, mac->txcw);
626 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 626 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
627 627
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
800 * The possible values of the "fc" parameter are: 800 * The possible values of the "fc" parameter are:
801 * 0: Flow control is completely disabled 801 * 0: Flow control is completely disabled
802 * 1: Rx flow control is enabled (we can receive pause frames, 802 * 1: Rx flow control is enabled (we can receive pause frames,
803 * but not send pause frames). 803 * but not send pause frames).
804 * 2: Tx flow control is enabled (we can send pause frames but we 804 * 2: Tx flow control is enabled (we can send pause frames but we
805 * do not support receiving pause frames). 805 * do not support receiving pause frames).
806 * 3: Both Rx and Tx flow control (symmetric) are enabled. 806 * 3: Both Rx and Tx flow control (symmetric) are enabled.
807 */ 807 */
808 switch (hw->fc.current_mode) { 808 switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
1031 * The possible values of the "fc" parameter are: 1031 * The possible values of the "fc" parameter are:
1032 * 0: Flow control is completely disabled 1032 * 0: Flow control is completely disabled
1033 * 1: Rx flow control is enabled (we can receive pause 1033 * 1: Rx flow control is enabled (we can receive pause
1034 * frames but not send pause frames). 1034 * frames but not send pause frames).
1035 * 2: Tx flow control is enabled (we can send pause frames 1035 * 2: Tx flow control is enabled (we can send pause frames
1036 * frames but we do not receive pause frames). 1036 * frames but we do not receive pause frames).
1037 * 3: Both Rx and Tx flow control (symmetric) is enabled. 1037 * 3: Both Rx and Tx flow control (symmetric) is enabled.
1038 * other: No other values should be possible at this point. 1038 * other: No other values should be possible at this point.
1039 */ 1039 */
@@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1189 } else { 1189 } else {
1190 hw->fc.current_mode = e1000_fc_rx_pause; 1190 hw->fc.current_mode = e1000_fc_rx_pause;
1191 e_dbg("Flow Control = " 1191 e_dbg("Flow Control = "
1192 "RX PAUSE frames only.\r\n"); 1192 "Rx PAUSE frames only.\r\n");
1193 } 1193 }
1194 } 1194 }
1195 /* 1195 /*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fa5b60452547..3065870cf2a7 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@ struct e1000_reg_info {
77 char *name; 77 char *name;
78}; 78};
79 79
80#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 80#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
81#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 81#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
82#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 82#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
83#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 83#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
84#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 84#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
85 85
86#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 86#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
87#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 87#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
88#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 88#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
89#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 89#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
90#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 90#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
91 91
92static const struct e1000_reg_info e1000_reg_info_tbl[] = { 92static const struct e1000_reg_info e1000_reg_info_tbl[] = {
93 93
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
99 /* Interrupt Registers */ 99 /* Interrupt Registers */
100 {E1000_ICR, "ICR"}, 100 {E1000_ICR, "ICR"},
101 101
102 /* RX Registers */ 102 /* Rx Registers */
103 {E1000_RCTL, "RCTL"}, 103 {E1000_RCTL, "RCTL"},
104 {E1000_RDLEN, "RDLEN"}, 104 {E1000_RDLEN, "RDLEN"},
105 {E1000_RDH, "RDH"}, 105 {E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
115 {E1000_RDFTS, "RDFTS"}, 115 {E1000_RDFTS, "RDFTS"},
116 {E1000_RDFPC, "RDFPC"}, 116 {E1000_RDFPC, "RDFPC"},
117 117
118 /* TX Registers */ 118 /* Tx Registers */
119 {E1000_TCTL, "TCTL"}, 119 {E1000_TCTL, "TCTL"},
120 {E1000_TDBAL, "TDBAL"}, 120 {E1000_TDBAL, "TDBAL"},
121 {E1000_TDBAH, "TDBAH"}, 121 {E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
160 break; 160 break;
161 default: 161 default:
162 printk(KERN_INFO "%-15s %08x\n", 162 printk(KERN_INFO "%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs)); 163 reginfo->name, __er32(hw, reginfo->ofs));
164 return; 164 return;
165 } 165 }
166 166
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
171 printk(KERN_CONT "\n"); 171 printk(KERN_CONT "\n");
172} 172}
173 173
174
175/* 174/*
176 * e1000e_dump - Print registers, tx-ring and rx-ring 175 * e1000e_dump - Print registers, Tx-ring and Rx-ring
177 */ 176 */
178static void e1000e_dump(struct e1000_adapter *adapter) 177static void e1000e_dump(struct e1000_adapter *adapter)
179{ 178{
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter)
182 struct e1000_reg_info *reginfo; 181 struct e1000_reg_info *reginfo;
183 struct e1000_ring *tx_ring = adapter->tx_ring; 182 struct e1000_ring *tx_ring = adapter->tx_ring;
184 struct e1000_tx_desc *tx_desc; 183 struct e1000_tx_desc *tx_desc;
185 struct my_u0 { u64 a; u64 b; } *u0; 184 struct my_u0 {
185 u64 a;
186 u64 b;
187 } *u0;
186 struct e1000_buffer *buffer_info; 188 struct e1000_buffer *buffer_info;
187 struct e1000_ring *rx_ring = adapter->rx_ring; 189 struct e1000_ring *rx_ring = adapter->rx_ring;
188 union e1000_rx_desc_packet_split *rx_desc_ps; 190 union e1000_rx_desc_packet_split *rx_desc_ps;
189 struct e1000_rx_desc *rx_desc; 191 struct e1000_rx_desc *rx_desc;
190 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; 192 struct my_u1 {
193 u64 a;
194 u64 b;
195 u64 c;
196 u64 d;
197 } *u1;
191 u32 staterr; 198 u32 staterr;
192 int i = 0; 199 int i = 0;
193 200
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
198 if (netdev) { 205 if (netdev) {
199 dev_info(&adapter->pdev->dev, "Net device Info\n"); 206 dev_info(&adapter->pdev->dev, "Net device Info\n");
200 printk(KERN_INFO "Device Name state " 207 printk(KERN_INFO "Device Name state "
201 "trans_start last_rx\n"); 208 "trans_start last_rx\n");
202 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 209 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
203 netdev->name, 210 netdev->name, netdev->state, netdev->trans_start,
204 netdev->state, 211 netdev->last_rx);
205 netdev->trans_start,
206 netdev->last_rx);
207 } 212 }
208 213
209 /* Print Registers */ 214 /* Print Registers */
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter)
214 e1000_regdump(hw, reginfo); 219 e1000_regdump(hw, reginfo);
215 } 220 }
216 221
217 /* Print TX Ring Summary */ 222 /* Print Tx Ring Summary */
218 if (!netdev || !netif_running(netdev)) 223 if (!netdev || !netif_running(netdev))
219 goto exit; 224 goto exit;
220 225
221 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
222 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" 227 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
223 " leng ntw timestamp\n"); 228 " leng ntw timestamp\n");
224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 229 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 230 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
226 0, tx_ring->next_to_use, tx_ring->next_to_clean, 231 0, tx_ring->next_to_use, tx_ring->next_to_clean,
227 (unsigned long long)buffer_info->dma, 232 (unsigned long long)buffer_info->dma,
228 buffer_info->length, 233 buffer_info->length,
229 buffer_info->next_to_watch, 234 buffer_info->next_to_watch,
230 (unsigned long long)buffer_info->time_stamp); 235 (unsigned long long)buffer_info->time_stamp);
231 236
232 /* Print TX Rings */ 237 /* Print Tx Ring */
233 if (!netif_msg_tx_done(adapter)) 238 if (!netif_msg_tx_done(adapter))
234 goto rx_ring_summary; 239 goto rx_ring_summary;
235 240
236 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 241 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
237 242
238 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 243 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
239 * 244 *
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
263 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 268 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
264 */ 269 */
265 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" 270 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
266 " [bi->dma ] leng ntw timestamp bi->skb " 271 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Legacy format\n"); 272 "<-- Legacy format\n");
268 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" 273 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
269 " [bi->dma ] leng ntw timestamp bi->skb " 274 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Context format\n"); 275 "<-- Ext Context format\n");
271 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" 276 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
272 " [bi->dma ] leng ntw timestamp bi->skb " 277 " [bi->dma ] leng ntw timestamp bi->skb "
273 "<-- Ext Data format\n"); 278 "<-- Ext Data format\n");
274 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 279 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
275 tx_desc = E1000_TX_DESC(*tx_ring, i); 280 tx_desc = E1000_TX_DESC(*tx_ring, i);
276 buffer_info = &tx_ring->buffer_info[i]; 281 buffer_info = &tx_ring->buffer_info[i];
277 u0 = (struct my_u0 *)tx_desc; 282 u0 = (struct my_u0 *)tx_desc;
278 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " 283 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
279 "%04X %3X %016llX %p", 284 "%04X %3X %016llX %p",
280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : 285 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, 286 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
282 (unsigned long long)le64_to_cpu(u0->a), 287 (unsigned long long)le64_to_cpu(u0->a),
283 (unsigned long long)le64_to_cpu(u0->b), 288 (unsigned long long)le64_to_cpu(u0->b),
284 (unsigned long long)buffer_info->dma, 289 (unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
296 301
297 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 302 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
298 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 303 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
299 16, 1, phys_to_virt(buffer_info->dma), 304 16, 1, phys_to_virt(buffer_info->dma),
300 buffer_info->length, true); 305 buffer_info->length, true);
301 } 306 }
302 307
303 /* Print RX Rings Summary */ 308 /* Print Rx Ring Summary */
304rx_ring_summary: 309rx_ring_summary:
305 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 310 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
306 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 311 printk(KERN_INFO "Queue [NTU] [NTC]\n");
307 printk(KERN_INFO " %5d %5X %5X\n", 0, 312 printk(KERN_INFO " %5d %5X %5X\n", 0,
308 rx_ring->next_to_use, rx_ring->next_to_clean); 313 rx_ring->next_to_use, rx_ring->next_to_clean);
309 314
310 /* Print RX Rings */ 315 /* Print Rx Ring */
311 if (!netif_msg_rx_status(adapter)) 316 if (!netif_msg_rx_status(adapter))
312 goto exit; 317 goto exit;
313 318
314 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 319 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
315 switch (adapter->rx_ps_pages) { 320 switch (adapter->rx_ps_pages) {
316 case 1: 321 case 1:
317 case 2: 322 case 2:
@@ -329,7 +334,7 @@ rx_ring_summary:
329 * +-----------------------------------------------------+ 334 * +-----------------------------------------------------+
330 */ 335 */
331 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " 336 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
332 "[buffer 1 63:0 ] " 337 "[buffer 1 63:0 ] "
333 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " 338 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
334 "[bi->skb] <-- Ext Pkt Split format\n"); 339 "[bi->skb] <-- Ext Pkt Split format\n");
335 /* [Extended] Receive Descriptor (Write-Back) Format 340 /* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@ rx_ring_summary:
344 * 63 48 47 32 31 20 19 0 349 * 63 48 47 32 31 20 19 0
345 */ 350 */
346 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " 351 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
347 "[vl l0 ee es] " 352 "[vl l0 ee es] "
348 "[ l3 l2 l1 hs] [reserved ] ---------------- " 353 "[ l3 l2 l1 hs] [reserved ] ---------------- "
349 "[bi->skb] <-- Ext Rx Write-Back format\n"); 354 "[bi->skb] <-- Ext Rx Write-Back format\n");
350 for (i = 0; i < rx_ring->count; i++) { 355 for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@ rx_ring_summary:
352 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 357 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
353 u1 = (struct my_u1 *)rx_desc_ps; 358 u1 = (struct my_u1 *)rx_desc_ps;
354 staterr = 359 staterr =
355 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 360 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
356 if (staterr & E1000_RXD_STAT_DD) { 361 if (staterr & E1000_RXD_STAT_DD) {
357 /* Descriptor Done */ 362 /* Descriptor Done */
358 printk(KERN_INFO "RWB[0x%03X] %016llX " 363 printk(KERN_INFO "RWB[0x%03X] %016llX "
359 "%016llX %016llX %016llX " 364 "%016llX %016llX %016llX "
360 "---------------- %p", i, 365 "---------------- %p", i,
361 (unsigned long long)le64_to_cpu(u1->a), 366 (unsigned long long)le64_to_cpu(u1->a),
362 (unsigned long long)le64_to_cpu(u1->b), 367 (unsigned long long)le64_to_cpu(u1->b),
363 (unsigned long long)le64_to_cpu(u1->c), 368 (unsigned long long)le64_to_cpu(u1->c),
364 (unsigned long long)le64_to_cpu(u1->d), 369 (unsigned long long)le64_to_cpu(u1->d),
365 buffer_info->skb); 370 buffer_info->skb);
366 } else { 371 } else {
367 printk(KERN_INFO "R [0x%03X] %016llX " 372 printk(KERN_INFO "R [0x%03X] %016llX "
368 "%016llX %016llX %016llX %016llX %p", i, 373 "%016llX %016llX %016llX %016llX %p", i,
369 (unsigned long long)le64_to_cpu(u1->a), 374 (unsigned long long)le64_to_cpu(u1->a),
370 (unsigned long long)le64_to_cpu(u1->b), 375 (unsigned long long)le64_to_cpu(u1->b),
371 (unsigned long long)le64_to_cpu(u1->c), 376 (unsigned long long)le64_to_cpu(u1->c),
372 (unsigned long long)le64_to_cpu(u1->d), 377 (unsigned long long)le64_to_cpu(u1->d),
373 (unsigned long long)buffer_info->dma, 378 (unsigned long long)buffer_info->dma,
374 buffer_info->skb); 379 buffer_info->skb);
375 380
376 if (netif_msg_pktdata(adapter)) 381 if (netif_msg_pktdata(adapter))
377 print_hex_dump(KERN_INFO, "", 382 print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@ rx_ring_summary:
400 * 63 48 47 40 39 32 31 16 15 0 405 * 63 48 47 40 39 32 31 16 15 0
401 */ 406 */
402 printk(KERN_INFO "Rl[desc] [address 63:0 ] " 407 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
403 "[vl er S cks ln] [bi->dma ] [bi->skb] " 408 "[vl er S cks ln] [bi->dma ] [bi->skb] "
404 "<-- Legacy format\n"); 409 "<-- Legacy format\n");
405 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 410 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
406 rx_desc = E1000_RX_DESC(*rx_ring, i); 411 rx_desc = E1000_RX_DESC(*rx_ring, i);
407 buffer_info = &rx_ring->buffer_info[i]; 412 buffer_info = &rx_ring->buffer_info[i];
408 u0 = (struct my_u0 *)rx_desc; 413 u0 = (struct my_u0 *)rx_desc;
409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 414 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
410 "%016llX %p", i, 415 "%016llX %p", i,
411 (unsigned long long)le64_to_cpu(u0->a), 416 (unsigned long long)le64_to_cpu(u0->a),
412 (unsigned long long)le64_to_cpu(u0->b), 417 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma, 418 (unsigned long long)buffer_info->dma,
414 buffer_info->skb); 419 buffer_info->skb);
415 if (i == rx_ring->next_to_use) 420 if (i == rx_ring->next_to_use)
416 printk(KERN_CONT " NTU\n"); 421 printk(KERN_CONT " NTU\n");
417 else if (i == rx_ring->next_to_clean) 422 else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@ rx_ring_summary:
421 426
422 if (netif_msg_pktdata(adapter)) 427 if (netif_msg_pktdata(adapter))
423 print_hex_dump(KERN_INFO, "", 428 print_hex_dump(KERN_INFO, "",
424 DUMP_PREFIX_ADDRESS, 429 DUMP_PREFIX_ADDRESS,
425 16, 1, phys_to_virt(buffer_info->dma), 430 16, 1,
426 adapter->rx_buffer_len, true); 431 phys_to_virt(buffer_info->dma),
432 adapter->rx_buffer_len, true);
427 } 433 }
428 } 434 }
429 435
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
450 * @skb: pointer to sk_buff to be indicated to stack 456 * @skb: pointer to sk_buff to be indicated to stack
451 **/ 457 **/
452static void e1000_receive_skb(struct e1000_adapter *adapter, 458static void e1000_receive_skb(struct e1000_adapter *adapter,
453 struct net_device *netdev, 459 struct net_device *netdev, struct sk_buff *skb,
454 struct sk_buff *skb,
455 u8 status, __le16 vlan) 460 u8 status, __le16 vlan)
456{ 461{
457 skb->protocol = eth_type_trans(skb, netdev); 462 skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
464} 469}
465 470
466/** 471/**
467 * e1000_rx_checksum - Receive Checksum Offload for 82543 472 * e1000_rx_checksum - Receive Checksum Offload
468 * @adapter: board private structure 473 * @adapter: board private structure
469 * @status_err: receive descriptor status and error fields 474 * @status_err: receive descriptor status and error fields
470 * @csum: receive descriptor csum field 475 * @csum: receive descriptor csum field
@@ -548,7 +553,7 @@ map_skb:
548 adapter->rx_buffer_len, 553 adapter->rx_buffer_len,
549 DMA_FROM_DEVICE); 554 DMA_FROM_DEVICE);
550 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 555 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
551 dev_err(&pdev->dev, "RX DMA map failed\n"); 556 dev_err(&pdev->dev, "Rx DMA map failed\n");
552 adapter->rx_dma_failed++; 557 adapter->rx_dma_failed++;
553 break; 558 break;
554 } 559 }
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
601 ps_page = &buffer_info->ps_pages[j]; 606 ps_page = &buffer_info->ps_pages[j];
602 if (j >= adapter->rx_ps_pages) { 607 if (j >= adapter->rx_ps_pages) {
603 /* all unused desc entries get hw null ptr */ 608 /* all unused desc entries get hw null ptr */
604 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); 609 rx_desc->read.buffer_addr[j + 1] =
610 ~cpu_to_le64(0);
605 continue; 611 continue;
606 } 612 }
607 if (!ps_page->page) { 613 if (!ps_page->page) {
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
617 if (dma_mapping_error(&pdev->dev, 623 if (dma_mapping_error(&pdev->dev,
618 ps_page->dma)) { 624 ps_page->dma)) {
619 dev_err(&adapter->pdev->dev, 625 dev_err(&adapter->pdev->dev,
620 "RX DMA page map failed\n"); 626 "Rx DMA page map failed\n");
621 adapter->rx_dma_failed++; 627 adapter->rx_dma_failed++;
622 goto no_buffers; 628 goto no_buffers;
623 } 629 }
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
627 * didn't change because each write-back 633 * didn't change because each write-back
628 * erases this info. 634 * erases this info.
629 */ 635 */
630 rx_desc->read.buffer_addr[j+1] = 636 rx_desc->read.buffer_addr[j + 1] =
631 cpu_to_le64(ps_page->dma); 637 cpu_to_le64(ps_page->dma);
632 } 638 }
633 639
634 skb = netdev_alloc_skb_ip_align(netdev, 640 skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
644 adapter->rx_ps_bsize0, 650 adapter->rx_ps_bsize0,
645 DMA_FROM_DEVICE); 651 DMA_FROM_DEVICE);
646 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 652 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
647 dev_err(&pdev->dev, "RX DMA map failed\n"); 653 dev_err(&pdev->dev, "Rx DMA map failed\n");
648 adapter->rx_dma_failed++; 654 adapter->rx_dma_failed++;
649 /* cleanup skb */ 655 /* cleanup skb */
650 dev_kfree_skb_any(skb); 656 dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
662 * such as IA-64). 668 * such as IA-64).
663 */ 669 */
664 wmb(); 670 wmb();
665 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); 671 writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
666 } 672 }
667 673
668 i++; 674 i++;
@@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1106 cleaned = 1; 1112 cleaned = 1;
1107 cleaned_count++; 1113 cleaned_count++;
1108 dma_unmap_single(&pdev->dev, buffer_info->dma, 1114 dma_unmap_single(&pdev->dev, buffer_info->dma,
1109 adapter->rx_ps_bsize0, 1115 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1110 DMA_FROM_DEVICE);
1111 buffer_info->dma = 0; 1116 buffer_info->dma = 0;
1112 1117
1113 /* see !EOP comment in other rx routine */ 1118 /* see !EOP comment in other Rx routine */
1114 if (!(staterr & E1000_RXD_STAT_EOP)) 1119 if (!(staterr & E1000_RXD_STAT_EOP))
1115 adapter->flags2 |= FLAG2_IS_DISCARDING; 1120 adapter->flags2 |= FLAG2_IS_DISCARDING;
1116 1121
@@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2610} 2615}
2611 2616
2612/** 2617/**
2613 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 2618 * e1000_configure_tx - Configure Transmit Unit after Reset
2614 * @adapter: board private structure 2619 * @adapter: board private structure
2615 * 2620 *
2616 * Configure the Tx unit of the MAC after a reset. 2621 * Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2663 * hthresh = 1 ==> prefetch when one or more available 2668 * hthresh = 1 ==> prefetch when one or more available
2664 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2669 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2665 * BEWARE: this seems to work but should be considered first if 2670 * BEWARE: this seems to work but should be considered first if
2666 * there are tx hangs or other tx related bugs 2671 * there are Tx hangs or other Tx related bugs
2667 */ 2672 */
2668 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2673 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2669 ew32(TXDCTL(0), txdctl); 2674 ew32(TXDCTL(0), txdctl);
@@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2877 if (adapter->rx_ps_pages) { 2882 if (adapter->rx_ps_pages) {
2878 /* this is a 32 byte descriptor */ 2883 /* this is a 32 byte descriptor */
2879 rdlen = rx_ring->count * 2884 rdlen = rx_ring->count *
2880 sizeof(union e1000_rx_desc_packet_split); 2885 sizeof(union e1000_rx_desc_packet_split);
2881 adapter->clean_rx = e1000_clean_rx_irq_ps; 2886 adapter->clean_rx = e1000_clean_rx_irq_ps;
2882 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2887 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2883 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 2888 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2900 /* 2905 /*
2901 * set the writeback threshold (only takes effect if the RDTR 2906 * set the writeback threshold (only takes effect if the RDTR
2902 * is set). set GRAN=1 and write back up to 0x4 worth, and 2907 * is set). set GRAN=1 and write back up to 0x4 worth, and
2903 * enable prefetching of 0x20 rx descriptors 2908 * enable prefetching of 0x20 Rx descriptors
2904 * granularity = 01 2909 * granularity = 01
2905 * wthresh = 04, 2910 * wthresh = 04,
2906 * hthresh = 04, 2911 * hthresh = 04,
@@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2981 * excessive C-state transition latencies result in 2986 * excessive C-state transition latencies result in
2982 * dropped transactions. 2987 * dropped transactions.
2983 */ 2988 */
2984 pm_qos_update_request( 2989 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
2985 &adapter->netdev->pm_qos_req, 55);
2986 } else { 2990 } else {
2987 pm_qos_update_request( 2991 pm_qos_update_request(&adapter->netdev->pm_qos_req,
2988 &adapter->netdev->pm_qos_req, 2992 PM_QOS_DEFAULT_VALUE);
2989 PM_QOS_DEFAULT_VALUE);
2990 } 2993 }
2991 } 2994 }
2992 2995
@@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3152 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3155 /* lower 16 bits has Rx packet buffer allocation size in KB */
3153 pba &= 0xffff; 3156 pba &= 0xffff;
3154 /* 3157 /*
3155 * the Tx fifo also stores 16 bytes of information about the tx 3158 * the Tx fifo also stores 16 bytes of information about the Tx
3156 * but don't include ethernet FCS because hardware appends it 3159 * but don't include ethernet FCS because hardware appends it
3157 */ 3160 */
3158 min_tx_space = (adapter->max_frame_size + 3161 min_tx_space = (adapter->max_frame_size +
@@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3175 pba -= min_tx_space - tx_space; 3178 pba -= min_tx_space - tx_space;
3176 3179
3177 /* 3180 /*
3178 * if short on Rx space, Rx wins and must trump tx 3181 * if short on Rx space, Rx wins and must trump Tx
3179 * adjustment or use Early Receive if available 3182 * adjustment or use Early Receive if available
3180 */ 3183 */
3181 if ((pba < min_rx_space) && 3184 if ((pba < min_rx_space) &&
@@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
4039 adapter->netdev->name, 4042 adapter->netdev->name,
4040 adapter->link_speed, 4043 adapter->link_speed,
4041 (adapter->link_duplex == FULL_DUPLEX) ? 4044 (adapter->link_duplex == FULL_DUPLEX) ?
4042 "Full Duplex" : "Half Duplex", 4045 "Full Duplex" : "Half Duplex",
4043 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 4046 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
4044 "RX/TX" : 4047 "Rx/Tx" :
4045 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 4048 ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
4046 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 4049 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
4047} 4050}
4048 4051
4049static bool e1000e_has_link(struct e1000_adapter *adapter) 4052static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4306,7 +4309,6 @@ link_up:
4306 * to get done, so reset controller to flush Tx. 4309 * to get done, so reset controller to flush Tx.
4307 * (Do the reset outside of interrupt context). 4310 * (Do the reset outside of interrupt context).
4308 */ 4311 */
4309 adapter->tx_timeout_count++;
4310 schedule_work(&adapter->reset_task); 4312 schedule_work(&adapter->reset_task);
4311 /* return immediately since reset is imminent */ 4313 /* return immediately since reset is imminent */
4312 return; 4314 return;
@@ -4338,7 +4340,7 @@ link_up:
4338 /* Force detection of hung controller every watchdog period */ 4340 /* Force detection of hung controller every watchdog period */
4339 adapter->detect_tx_hung = 1; 4341 adapter->detect_tx_hung = 1;
4340 4342
4341 /* flush partial descriptors to memory before detecting tx hang */ 4343 /* flush partial descriptors to memory before detecting Tx hang */
4342 if (adapter->flags2 & FLAG2_DMA_BURST) { 4344 if (adapter->flags2 & FLAG2_DMA_BURST) {
4343 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4345 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4344 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4346 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
@@ -4529,7 +4531,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4529 buffer_info->next_to_watch = i; 4531 buffer_info->next_to_watch = i;
4530 buffer_info->dma = dma_map_single(&pdev->dev, 4532 buffer_info->dma = dma_map_single(&pdev->dev,
4531 skb->data + offset, 4533 skb->data + offset,
4532 size, DMA_TO_DEVICE); 4534 size, DMA_TO_DEVICE);
4533 buffer_info->mapped_as_page = false; 4535 buffer_info->mapped_as_page = false;
4534 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4536 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4535 goto dma_error; 4537 goto dma_error;
@@ -4576,7 +4578,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4576 } 4578 }
4577 } 4579 }
4578 4580
4579 segs = skb_shinfo(skb)->gso_segs ?: 1; 4581 segs = skb_shinfo(skb)->gso_segs ? : 1;
4580 /* multiply data chunks by size of headers */ 4582 /* multiply data chunks by size of headers */
4581 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4583 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4582 4584
@@ -4588,13 +4590,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4588 return count; 4590 return count;
4589 4591
4590dma_error: 4592dma_error:
4591 dev_err(&pdev->dev, "TX DMA map failed\n"); 4593 dev_err(&pdev->dev, "Tx DMA map failed\n");
4592 buffer_info->dma = 0; 4594 buffer_info->dma = 0;
4593 if (count) 4595 if (count)
4594 count--; 4596 count--;
4595 4597
4596 while (count--) { 4598 while (count--) {
4597 if (i==0) 4599 if (i == 0)
4598 i += tx_ring->count; 4600 i += tx_ring->count;
4599 i--; 4601 i--;
4600 buffer_info = &tx_ring->buffer_info[i]; 4602 buffer_info = &tx_ring->buffer_info[i];
@@ -6193,7 +6195,7 @@ static int __init e1000_init_module(void)
6193 int ret; 6195 int ret;
6194 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6196 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6195 e1000e_driver_version); 6197 e1000e_driver_version);
6196 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); 6198 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
6197 ret = pci_register_driver(&e1000_driver); 6199 ret = pci_register_driver(&e1000_driver);
6198 6200
6199 return ret; 6201 return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0e4bca..4dd9b63273f6 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak,
62 module_param_array_named(X, X, int, &num_##X, 0); \ 62 module_param_array_named(X, X, int, &num_##X, 0); \
63 MODULE_PARM_DESC(X, desc); 63 MODULE_PARM_DESC(X, desc);
64 64
65
66/* 65/*
67 * Transmit Interrupt Delay in units of 1.024 microseconds 66 * Transmit Interrupt Delay in units of 1.024 microseconds
68 * Tx interrupt delay needs to typically be set to something non zero 67 * Tx interrupt delay needs to typically be set to something non-zero
69 * 68 *
70 * Valid Range: 0-65535 69 * Valid Range: 0-65535
71 */ 70 */
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
112#define DEFAULT_ITR 3 111#define DEFAULT_ITR 3
113#define MAX_ITR 100000 112#define MAX_ITR 100000
114#define MIN_ITR 100 113#define MIN_ITR 100
114
115/* IntMode (Interrupt Mode) 115/* IntMode (Interrupt Mode)
116 * 116 *
117 * Valid Range: 0 - 2 117 * Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 00f89e8a9fa0..6bea051b134b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
640 s32 ret_val; 640 s32 ret_val;
641 u16 phy_data; 641 u16 phy_data;
642 642
643 /* Enable CRS on TX. This must be set for half-duplex operation. */ 643 /* Enable CRS on Tx. This must be set for half-duplex operation. */
644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); 644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
645 if (ret_val) 645 if (ret_val)
646 goto out; 646 goto out;
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
812 if (netif_msg_hw(priv)) 812 if (netif_msg_hw(priv))
813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", 813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
814 endptr + 1); 814 endptr + 1);
815 enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); 815 enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
816} 816}
817 817
818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, 818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675016b5..5ed8f9f9419f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev)
434static struct net_device_stats *gfar_get_stats(struct net_device *dev) 434static struct net_device_stats *gfar_get_stats(struct net_device *dev)
435{ 435{
436 struct gfar_private *priv = netdev_priv(dev); 436 struct gfar_private *priv = netdev_priv(dev);
437 struct netdev_queue *txq;
438 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 437 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
439 unsigned long tx_packets = 0, tx_bytes = 0; 438 unsigned long tx_packets = 0, tx_bytes = 0;
440 int i = 0; 439 int i = 0;
@@ -450,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
450 dev->stats.rx_dropped = rx_dropped; 449 dev->stats.rx_dropped = rx_dropped;
451 450
452 for (i = 0; i < priv->num_tx_queues; i++) { 451 for (i = 0; i < priv->num_tx_queues; i++) {
453 txq = netdev_get_tx_queue(dev, i); 452 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
454 tx_bytes += txq->tx_bytes; 453 tx_packets += priv->tx_queue[i]->stats.tx_packets;
455 tx_packets += txq->tx_packets;
456 } 454 }
457 455
458 dev->stats.tx_bytes = tx_bytes; 456 dev->stats.tx_bytes = tx_bytes;
@@ -1922,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
1922 if (err) { 1920 if (err) {
1923 for (j = 0; j < i; j++) 1921 for (j = 0; j < i; j++)
1924 free_grp_irqs(&priv->gfargrp[j]); 1922 free_grp_irqs(&priv->gfargrp[j]);
1925 goto irq_fail; 1923 goto irq_fail;
1926 } 1924 }
1927 } 1925 }
1928 1926
@@ -2109,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2109 } 2107 }
2110 2108
2111 /* Update transmit stats */ 2109 /* Update transmit stats */
2112 txq->tx_bytes += skb->len; 2110 tx_queue->stats.tx_bytes += skb->len;
2113 txq->tx_packets ++; 2111 tx_queue->stats.tx_packets++;
2114 2112
2115 txbdp = txbdp_start = tx_queue->cur_tx; 2113 txbdp = txbdp_start = tx_queue->cur_tx;
2116 lstatus = txbdp->lstatus; 2114 lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb88ae0..54de4135e932 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
907 MQ_MG_MODE 907 MQ_MG_MODE
908}; 908};
909 909
910/*
911 * Per TX queue stats
912 */
913struct tx_q_stats {
914 unsigned long tx_packets;
915 unsigned long tx_bytes;
916};
917
910/** 918/**
911 * struct gfar_priv_tx_q - per tx queue structure 919 * struct gfar_priv_tx_q - per tx queue structure
912 * @txlock: per queue tx spin lock 920 * @txlock: per queue tx spin lock
913 * @tx_skbuff:skb pointers 921 * @tx_skbuff:skb pointers
914 * @skb_curtx: to be used skb pointer 922 * @skb_curtx: to be used skb pointer
915 * @skb_dirtytx:the last used skb pointer 923 * @skb_dirtytx:the last used skb pointer
924 * @stats: bytes/packets stats
916 * @qindex: index of this queue 925 * @qindex: index of this queue
917 * @dev: back pointer to the dev structure 926 * @dev: back pointer to the dev structure
918 * @grp: back pointer to the group to which this queue belongs 927 * @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
934 struct txbd8 *tx_bd_base; 943 struct txbd8 *tx_bd_base;
935 struct txbd8 *cur_tx; 944 struct txbd8 *cur_tx;
936 struct txbd8 *dirty_tx; 945 struct txbd8 *dirty_tx;
946 struct tx_q_stats stats;
937 struct net_device *dev; 947 struct net_device *dev;
938 struct gfar_priv_grp *grp; 948 struct gfar_priv_grp *grp;
939 u16 skb_curtx; 949 u16 skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 * 3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB 4 * 2005-2010 (c) Aeroflex Gaisler AB
5 * 5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library. 7 * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
356 dev_dbg(&dev->dev, " starting queue\n"); 356 dev_dbg(&dev->dev, " starting queue\n");
357 netif_start_queue(dev); 357 netif_start_queue(dev);
358 358
359 GRETH_REGSAVE(greth->regs->status, 0xFF);
360
359 napi_enable(&greth->napi); 361 napi_enable(&greth->napi);
360 362
361 greth_enable_irqs(greth); 363 greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
371 373
372 napi_disable(&greth->napi); 374 napi_disable(&greth->napi);
373 375
376 greth_disable_irqs(greth);
374 greth_disable_tx(greth); 377 greth_disable_tx(greth);
378 greth_disable_rx(greth);
375 379
376 netif_stop_queue(dev); 380 netif_stop_queue(dev);
377 381
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
388 struct greth_private *greth = netdev_priv(dev); 392 struct greth_private *greth = netdev_priv(dev);
389 struct greth_bd *bdp; 393 struct greth_bd *bdp;
390 int err = NETDEV_TX_OK; 394 int err = NETDEV_TX_OK;
391 u32 status, dma_addr; 395 u32 status, dma_addr, ctrl;
396 unsigned long flags;
392 397
393 bdp = greth->tx_bd_base + greth->tx_next; 398 /* Clean TX Ring */
399 greth_clean_tx(greth->netdev);
394 400
395 if (unlikely(greth->tx_free <= 0)) { 401 if (unlikely(greth->tx_free <= 0)) {
402 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
403 ctrl = GRETH_REGLOAD(greth->regs->control);
404 /* Enable TX IRQ only if not already in poll() routine */
405 if (ctrl & GRETH_RXI)
406 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
396 netif_stop_queue(dev); 407 netif_stop_queue(dev);
408 spin_unlock_irqrestore(&greth->devlock, flags);
397 return NETDEV_TX_BUSY; 409 return NETDEV_TX_BUSY;
398 } 410 }
399 411
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
406 goto out; 418 goto out;
407 } 419 }
408 420
421 bdp = greth->tx_bd_base + greth->tx_next;
409 dma_addr = greth_read_bd(&bdp->addr); 422 dma_addr = greth_read_bd(&bdp->addr);
410 423
411 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); 424 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
412 425
413 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 426 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
414 427
415 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); 428 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
416 429
417 /* Wrap around descriptor ring */ 430 /* Wrap around descriptor ring */
418 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 431 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
422 greth->tx_next = NEXT_TX(greth->tx_next); 435 greth->tx_next = NEXT_TX(greth->tx_next);
423 greth->tx_free--; 436 greth->tx_free--;
424 437
425 /* No more descriptors */
426 if (unlikely(greth->tx_free == 0)) {
427
428 /* Free transmitted descriptors */
429 greth_clean_tx(dev);
430
431 /* If nothing was cleaned, stop queue & wait for irq */
432 if (unlikely(greth->tx_free == 0)) {
433 status |= GRETH_BD_IE;
434 netif_stop_queue(dev);
435 }
436 }
437
438 /* Write descriptor control word and enable transmission */ 438 /* Write descriptor control word and enable transmission */
439 greth_write_bd(&bdp->stat, status); 439 greth_write_bd(&bdp->stat, status);
440 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
440 greth_enable_tx(greth); 441 greth_enable_tx(greth);
442 spin_unlock_irqrestore(&greth->devlock, flags);
441 443
442out: 444out:
443 dev_kfree_skb(skb); 445 dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
450{ 452{
451 struct greth_private *greth = netdev_priv(dev); 453 struct greth_private *greth = netdev_priv(dev);
452 struct greth_bd *bdp; 454 struct greth_bd *bdp;
453 u32 status = 0, dma_addr; 455 u32 status = 0, dma_addr, ctrl;
454 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 456 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
457 unsigned long flags;
455 458
456 nr_frags = skb_shinfo(skb)->nr_frags; 459 nr_frags = skb_shinfo(skb)->nr_frags;
457 460
461 /* Clean TX Ring */
462 greth_clean_tx_gbit(dev);
463
458 if (greth->tx_free < nr_frags + 1) { 464 if (greth->tx_free < nr_frags + 1) {
465 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
466 ctrl = GRETH_REGLOAD(greth->regs->control);
467 /* Enable TX IRQ only if not already in poll() routine */
468 if (ctrl & GRETH_RXI)
469 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
459 netif_stop_queue(dev); 470 netif_stop_queue(dev);
471 spin_unlock_irqrestore(&greth->devlock, flags);
460 err = NETDEV_TX_BUSY; 472 err = NETDEV_TX_BUSY;
461 goto out; 473 goto out;
462 } 474 }
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
499 greth->tx_skbuff[curr_tx] = NULL; 511 greth->tx_skbuff[curr_tx] = NULL;
500 bdp = greth->tx_bd_base + curr_tx; 512 bdp = greth->tx_bd_base + curr_tx;
501 513
502 status = GRETH_TXBD_CSALL; 514 status = GRETH_TXBD_CSALL | GRETH_BD_EN;
503 status |= frag->size & GRETH_BD_LEN; 515 status |= frag->size & GRETH_BD_LEN;
504 516
505 /* Wrap around descriptor ring */ 517 /* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
509 /* More fragments left */ 521 /* More fragments left */
510 if (i < nr_frags - 1) 522 if (i < nr_frags - 1)
511 status |= GRETH_TXBD_MORE; 523 status |= GRETH_TXBD_MORE;
512 524 else
513 /* ... last fragment, check if out of descriptors */ 525 status |= GRETH_BD_IE; /* enable IRQ on last fragment */
514 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
515
516 /* Enable interrupts and stop queue */
517 status |= GRETH_BD_IE;
518 netif_stop_queue(dev);
519 }
520 526
521 greth_write_bd(&bdp->stat, status); 527 greth_write_bd(&bdp->stat, status);
522 528
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
536 542
537 wmb(); 543 wmb();
538 544
539 /* Enable the descriptors that we configured ... */ 545 /* Enable the descriptor chain by enabling the first descriptor */
540 for (i = 0; i < nr_frags + 1; i++) { 546 bdp = greth->tx_bd_base + greth->tx_next;
541 bdp = greth->tx_bd_base + greth->tx_next; 547 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
542 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 548 greth->tx_next = curr_tx;
543 greth->tx_next = NEXT_TX(greth->tx_next); 549 greth->tx_free -= nr_frags + 1;
544 greth->tx_free--;
545 }
546 550
551 wmb();
552
553 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
547 greth_enable_tx(greth); 554 greth_enable_tx(greth);
555 spin_unlock_irqrestore(&greth->devlock, flags);
548 556
549 return NETDEV_TX_OK; 557 return NETDEV_TX_OK;
550 558
551frag_map_error: 559frag_map_error:
552 /* Unmap SKB mappings that succeeded */ 560 /* Unmap SKB mappings that succeeded and disable descriptor */
553 for (i = 0; greth->tx_next + i != curr_tx; i++) { 561 for (i = 0; greth->tx_next + i != curr_tx; i++) {
554 bdp = greth->tx_bd_base + greth->tx_next + i; 562 bdp = greth->tx_bd_base + greth->tx_next + i;
555 dma_unmap_single(greth->dev, 563 dma_unmap_single(greth->dev,
556 greth_read_bd(&bdp->addr), 564 greth_read_bd(&bdp->addr),
557 greth_read_bd(&bdp->stat) & GRETH_BD_LEN, 565 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
558 DMA_TO_DEVICE); 566 DMA_TO_DEVICE);
567 greth_write_bd(&bdp->stat, 0);
559 } 568 }
560map_error: 569map_error:
561 if (net_ratelimit()) 570 if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
565 return err; 574 return err;
566} 575}
567 576
568
569static irqreturn_t greth_interrupt(int irq, void *dev_id) 577static irqreturn_t greth_interrupt(int irq, void *dev_id)
570{ 578{
571 struct net_device *dev = dev_id; 579 struct net_device *dev = dev_id;
572 struct greth_private *greth; 580 struct greth_private *greth;
573 u32 status; 581 u32 status, ctrl;
574 irqreturn_t retval = IRQ_NONE; 582 irqreturn_t retval = IRQ_NONE;
575 583
576 greth = netdev_priv(dev); 584 greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
580 /* Get the interrupt events that caused us to be here. */ 588 /* Get the interrupt events that caused us to be here. */
581 status = GRETH_REGLOAD(greth->regs->status); 589 status = GRETH_REGLOAD(greth->regs->status);
582 590
583 /* Handle rx and tx interrupts through poll */ 591 /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
584 if (status & (GRETH_INT_RX | GRETH_INT_TX)) { 592 * set regardless of whether IRQ is enabled or not. Especially
585 593 * important when shared IRQ.
586 /* Clear interrupt status */ 594 */
587 GRETH_REGORIN(greth->regs->status, 595 ctrl = GRETH_REGLOAD(greth->regs->control);
588 status & (GRETH_INT_RX | GRETH_INT_TX));
589 596
597 /* Handle rx and tx interrupts through poll */
598 if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
599 ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
590 retval = IRQ_HANDLED; 600 retval = IRQ_HANDLED;
591 601
592 /* Disable interrupts and schedule poll() */ 602 /* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
610 620
611 while (1) { 621 while (1) {
612 bdp = greth->tx_bd_base + greth->tx_last; 622 bdp = greth->tx_bd_base + greth->tx_last;
623 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
624 mb();
613 stat = greth_read_bd(&bdp->stat); 625 stat = greth_read_bd(&bdp->stat);
614 626
615 if (unlikely(stat & GRETH_BD_EN)) 627 if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
670 682
671 /* We only clean fully completed SKBs */ 683 /* We only clean fully completed SKBs */
672 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 684 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
673 stat = bdp_last_frag->stat; 685
686 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
687 mb();
688 stat = greth_read_bd(&bdp_last_frag->stat);
674 689
675 if (stat & GRETH_BD_EN) 690 if (stat & GRETH_BD_EN)
676 break; 691 break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
702 greth->tx_free += nr_frags+1; 717 greth->tx_free += nr_frags+1;
703 dev_kfree_skb(skb); 718 dev_kfree_skb(skb);
704 } 719 }
705 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
706 netif_wake_queue(dev);
707 }
708}
709 720
710static int greth_pending_packets(struct greth_private *greth) 721 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
711{ 722 netif_wake_queue(dev);
712 struct greth_bd *bdp;
713 u32 status;
714 bdp = greth->rx_bd_base + greth->rx_cur;
715 status = greth_read_bd(&bdp->stat);
716 if (status & GRETH_BD_EN)
717 return 0;
718 else
719 return 1;
720} 723}
721 724
722static int greth_rx(struct net_device *dev, int limit) 725static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
727 int pkt_len; 730 int pkt_len;
728 int bad, count; 731 int bad, count;
729 u32 status, dma_addr; 732 u32 status, dma_addr;
733 unsigned long flags;
730 734
731 greth = netdev_priv(dev); 735 greth = netdev_priv(dev);
732 736
733 for (count = 0; count < limit; ++count) { 737 for (count = 0; count < limit; ++count) {
734 738
735 bdp = greth->rx_bd_base + greth->rx_cur; 739 bdp = greth->rx_bd_base + greth->rx_cur;
740 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
741 mb();
736 status = greth_read_bd(&bdp->stat); 742 status = greth_read_bd(&bdp->stat);
737 dma_addr = greth_read_bd(&bdp->addr);
738 bad = 0;
739 743
740 if (unlikely(status & GRETH_BD_EN)) { 744 if (unlikely(status & GRETH_BD_EN)) {
741 break; 745 break;
742 } 746 }
743 747
748 dma_addr = greth_read_bd(&bdp->addr);
749 bad = 0;
750
744 /* Check status for errors. */ 751 /* Check status for errors. */
745 if (unlikely(status & GRETH_RXBD_STATUS)) { 752 if (unlikely(status & GRETH_RXBD_STATUS)) {
746 if (status & GRETH_RXBD_ERR_FT) { 753 if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
802 809
803 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); 810 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
804 811
812 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
805 greth_enable_rx(greth); 813 greth_enable_rx(greth);
814 spin_unlock_irqrestore(&greth->devlock, flags);
806 815
807 greth->rx_cur = NEXT_RX(greth->rx_cur); 816 greth->rx_cur = NEXT_RX(greth->rx_cur);
808 } 817 }
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
836 int pkt_len; 845 int pkt_len;
837 int bad, count = 0; 846 int bad, count = 0;
838 u32 status, dma_addr; 847 u32 status, dma_addr;
848 unsigned long flags;
839 849
840 greth = netdev_priv(dev); 850 greth = netdev_priv(dev);
841 851
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
843 853
844 bdp = greth->rx_bd_base + greth->rx_cur; 854 bdp = greth->rx_bd_base + greth->rx_cur;
845 skb = greth->rx_skbuff[greth->rx_cur]; 855 skb = greth->rx_skbuff[greth->rx_cur];
856 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
857 mb();
846 status = greth_read_bd(&bdp->stat); 858 status = greth_read_bd(&bdp->stat);
847 bad = 0; 859 bad = 0;
848 860
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
865 } 877 }
866 } 878 }
867 879
868 /* Allocate new skb to replace current */ 880 /* Allocate new skb to replace current, not needed if the
869 newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); 881 * current skb can be reused */
870 882 if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
871 if (!bad && newskb) {
872 skb_reserve(newskb, NET_IP_ALIGN); 883 skb_reserve(newskb, NET_IP_ALIGN);
873 884
874 dma_addr = dma_map_single(greth->dev, 885 dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
905 if (net_ratelimit()) 916 if (net_ratelimit())
906 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); 917 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
907 dev_kfree_skb(newskb); 918 dev_kfree_skb(newskb);
919 /* reusing current skb, so it is a drop */
908 dev->stats.rx_dropped++; 920 dev->stats.rx_dropped++;
909 } 921 }
922 } else if (bad) {
923 /* Bad Frame transfer, the skb is reused */
924 dev->stats.rx_dropped++;
910 } else { 925 } else {
926 /* Failed Allocating a new skb. This is rather stupid
927 * but the current "filled" skb is reused, as if
928 * transfer failure. One could argue that RX descriptor
929 * table handling should be divided into cleaning and
930 * filling as the TX part of the driver
931 */
911 if (net_ratelimit()) 932 if (net_ratelimit())
912 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); 933 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
934 /* reusing current skb, so it is a drop */
913 dev->stats.rx_dropped++; 935 dev->stats.rx_dropped++;
914 } 936 }
915 937
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
920 942
921 wmb(); 943 wmb();
922 greth_write_bd(&bdp->stat, status); 944 greth_write_bd(&bdp->stat, status);
945 spin_lock_irqsave(&greth->devlock, flags);
923 greth_enable_rx(greth); 946 greth_enable_rx(greth);
947 spin_unlock_irqrestore(&greth->devlock, flags);
924 greth->rx_cur = NEXT_RX(greth->rx_cur); 948 greth->rx_cur = NEXT_RX(greth->rx_cur);
925 } 949 }
926 950
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
932{ 956{
933 struct greth_private *greth; 957 struct greth_private *greth;
934 int work_done = 0; 958 int work_done = 0;
959 unsigned long flags;
960 u32 mask, ctrl;
935 greth = container_of(napi, struct greth_private, napi); 961 greth = container_of(napi, struct greth_private, napi);
936 962
937 if (greth->gbit_mac) { 963restart_txrx_poll:
938 greth_clean_tx_gbit(greth->netdev); 964 if (netif_queue_stopped(greth->netdev)) {
939 } else { 965 if (greth->gbit_mac)
940 greth_clean_tx(greth->netdev); 966 greth_clean_tx_gbit(greth->netdev);
967 else
968 greth_clean_tx(greth->netdev);
941 } 969 }
942 970
943restart_poll:
944 if (greth->gbit_mac) { 971 if (greth->gbit_mac) {
945 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 972 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
946 } else { 973 } else {
@@ -949,15 +976,29 @@ restart_poll:
949 976
950 if (work_done < budget) { 977 if (work_done < budget) {
951 978
952 napi_complete(napi); 979 spin_lock_irqsave(&greth->devlock, flags);
980
981 ctrl = GRETH_REGLOAD(greth->regs->control);
982 if (netif_queue_stopped(greth->netdev)) {
983 GRETH_REGSAVE(greth->regs->control,
984 ctrl | GRETH_TXI | GRETH_RXI);
985 mask = GRETH_INT_RX | GRETH_INT_RE |
986 GRETH_INT_TX | GRETH_INT_TE;
987 } else {
988 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE;
990 }
953 991
954 if (greth_pending_packets(greth)) { 992 if (GRETH_REGLOAD(greth->regs->status) & mask) {
955 napi_reschedule(napi); 993 GRETH_REGSAVE(greth->regs->control, ctrl);
956 goto restart_poll; 994 spin_unlock_irqrestore(&greth->devlock, flags);
995 goto restart_txrx_poll;
996 } else {
997 __napi_complete(napi);
998 spin_unlock_irqrestore(&greth->devlock, flags);
957 } 999 }
958 } 1000 }
959 1001
960 greth_enable_irqs(greth);
961 return work_done; 1002 return work_done;
962} 1003}
963 1004
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
1152}; 1193};
1153 1194
1154static struct net_device_ops greth_netdev_ops = { 1195static struct net_device_ops greth_netdev_ops = {
1155 .ndo_open = greth_open, 1196 .ndo_open = greth_open,
1156 .ndo_stop = greth_close, 1197 .ndo_stop = greth_close,
1157 .ndo_start_xmit = greth_start_xmit, 1198 .ndo_start_xmit = greth_start_xmit,
1158 .ndo_set_mac_address = greth_set_mac_add, 1199 .ndo_set_mac_address = greth_set_mac_add,
1159 .ndo_validate_addr = eth_validate_addr, 1200 .ndo_validate_addr = eth_validate_addr,
1160}; 1201};
1161 1202
1162static inline int wait_for_mdio(struct greth_private *greth) 1203static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
1217 struct greth_private *greth = netdev_priv(dev); 1258 struct greth_private *greth = netdev_priv(dev);
1218 struct phy_device *phydev = greth->phy; 1259 struct phy_device *phydev = greth->phy;
1219 unsigned long flags; 1260 unsigned long flags;
1220
1221 int status_change = 0; 1261 int status_change = 0;
1262 u32 ctrl;
1222 1263
1223 spin_lock_irqsave(&greth->devlock, flags); 1264 spin_lock_irqsave(&greth->devlock, flags);
1224 1265
1225 if (phydev->link) { 1266 if (phydev->link) {
1226 1267
1227 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { 1268 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228 1269 ctrl = GRETH_REGLOAD(greth->regs->control) &
1229 GRETH_REGANDIN(greth->regs->control, 1270 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1230 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231 1271
1232 if (phydev->duplex) 1272 if (phydev->duplex)
1233 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); 1273 ctrl |= GRETH_CTRL_FD;
1234
1235 if (phydev->speed == SPEED_100) {
1236
1237 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1238 }
1239 1274
1275 if (phydev->speed == SPEED_100)
1276 ctrl |= GRETH_CTRL_SP;
1240 else if (phydev->speed == SPEED_1000) 1277 else if (phydev->speed == SPEED_1000)
1241 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); 1278 ctrl |= GRETH_CTRL_GB;
1242 1279
1280 GRETH_REGSAVE(greth->regs->control, ctrl);
1243 greth->speed = phydev->speed; 1281 greth->speed = phydev->speed;
1244 greth->duplex = phydev->duplex; 1282 greth->duplex = phydev->duplex;
1245 status_change = 1; 1283 status_change = 1;
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = {
1600 { 1638 {
1601 .name = "GAISLER_ETHMAC", 1639 .name = "GAISLER_ETHMAC",
1602 }, 1640 },
1641 {
1642 .name = "01_01d",
1643 },
1603 {}, 1644 {},
1604}; 1645};
1605 1646
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903cd676..be0f2062bd14 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
23#define GRETH_BD_LEN 0x7FF 23#define GRETH_BD_LEN 0x7FF
24 24
25#define GRETH_TXEN 0x1 25#define GRETH_TXEN 0x1
26#define GRETH_INT_TE 0x2
26#define GRETH_INT_TX 0x8 27#define GRETH_INT_TX 0x8
27#define GRETH_TXI 0x4 28#define GRETH_TXI 0x4
28#define GRETH_TXBD_STATUS 0x0001C000 29#define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
35#define GRETH_TXBD_ERR_UE 0x4000 36#define GRETH_TXBD_ERR_UE 0x4000
36#define GRETH_TXBD_ERR_AL 0x8000 37#define GRETH_TXBD_ERR_AL 0x8000
37 38
39#define GRETH_INT_RE 0x1
38#define GRETH_INT_RX 0x4 40#define GRETH_INT_RX 0x4
39#define GRETH_RXEN 0x2 41#define GRETH_RXEN 0x2
40#define GRETH_RXI 0x8 42#define GRETH_RXI 0x8
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
635 635
636 ret = sh_irda_set_baudrate(self, speed); 636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0) 637 if (ret < 0)
638 return ret; 638 goto sh_irda_hard_xmit_end;
639 639
640 self->tx_buff.len = 0; 640 self->tx_buff.len = 0;
641 if (skb->len) { 641 if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
652 652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len); 653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE); 654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 } 655 } else
656 goto sh_irda_hard_xmit_end;
656 657
657 dev_kfree_skb(skb); 658 dev_kfree_skb(skb);
658 659
659 return 0; 660 return 0;
661
662sh_irda_hard_xmit_end:
663 sh_irda_set_baudrate(self, 9600);
664 netif_wake_queue(self->ndev);
665 sh_irda_rcv_ctrl(self, 1);
666 dev_kfree_skb(skb);
667
668 return ret;
669
660} 670}
661 671
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) 672static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1371 1371
1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1373
1374 /* clear VMDq pool/queue selection for RAR 0 */
1375 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1373 } 1376 }
1374 hw->addr_ctrl.overflow_promisc = 0; 1377 hw->addr_ctrl.overflow_promisc = 0;
1375 1378
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..8753980668c7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -165,7 +165,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 166 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 167 u32 fcbuff, fcdmarw, fcfltrw;
168 dma_addr_t addr; 168 dma_addr_t addr = 0;
169 169
170 if (!netdev || !sgl) 170 if (!netdev || !sgl)
171 return 0; 171 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a060610a42db..fbae703b46d7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "3.0.12-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3176 u32 mhadd, hlreg0; 3176 u32 mhadd, hlreg0;
3177 3177
3178 /* Decide whether to use packet split mode or not */ 3178 /* Decide whether to use packet split mode or not */
3179 /* On by default */
3180 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3181
3179 /* Do not use packet split if we're in SR-IOV Mode */ 3182 /* Do not use packet split if we're in SR-IOV Mode */
3180 if (!adapter->num_vfs) 3183 if (adapter->num_vfs)
3181 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3184 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3185
3186 /* Disable packet split due to 82599 erratum #45 */
3187 if (hw->mac.type == ixgbe_mac_82599EB)
3188 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3182 3189
3183 /* Set the RX buffer length according to the mode */ 3190 /* Set the RX buffer length according to the mode */
3184 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -4863,16 +4870,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4863{ 4870{
4864 int q_idx, num_q_vectors; 4871 int q_idx, num_q_vectors;
4865 struct ixgbe_q_vector *q_vector; 4872 struct ixgbe_q_vector *q_vector;
4866 int napi_vectors;
4867 int (*poll)(struct napi_struct *, int); 4873 int (*poll)(struct napi_struct *, int);
4868 4874
4869 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4875 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4870 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4876 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4871 napi_vectors = adapter->num_rx_queues;
4872 poll = &ixgbe_clean_rxtx_many; 4877 poll = &ixgbe_clean_rxtx_many;
4873 } else { 4878 } else {
4874 num_q_vectors = 1; 4879 num_q_vectors = 1;
4875 napi_vectors = 1;
4876 poll = &ixgbe_poll; 4880 poll = &ixgbe_poll;
4877 } 4881 }
4878 4882
@@ -6667,8 +6671,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6667 struct ixgbe_adapter *adapter, 6671 struct ixgbe_adapter *adapter,
6668 struct ixgbe_ring *tx_ring) 6672 struct ixgbe_ring *tx_ring)
6669{ 6673{
6670 struct net_device *netdev = tx_ring->netdev;
6671 struct netdev_queue *txq;
6672 unsigned int first; 6674 unsigned int first;
6673 unsigned int tx_flags = 0; 6675 unsigned int tx_flags = 0;
6674 u8 hdr_len = 0; 6676 u8 hdr_len = 0;
@@ -6765,9 +6767,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6765 /* add the ATR filter if ATR is on */ 6767 /* add the ATR filter if ATR is on */
6766 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 6768 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6767 ixgbe_atr(tx_ring, skb, tx_flags, protocol); 6769 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6768 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6769 txq->tx_bytes += skb->len;
6770 txq->tx_packets++;
6771 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); 6770 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6772 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 6771 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6773 6772
@@ -6925,8 +6924,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6925 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6924 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6926 int i; 6925 int i;
6927 6926
6928 /* accurate rx/tx bytes/packets stats */
6929 dev_txq_stats_fold(netdev, stats);
6930 rcu_read_lock(); 6927 rcu_read_lock();
6931 for (i = 0; i < adapter->num_rx_queues; i++) { 6928 for (i = 0; i < adapter->num_rx_queues; i++) {
6932 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); 6929 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6943,6 +6940,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6943 stats->rx_bytes += bytes; 6940 stats->rx_bytes += bytes;
6944 } 6941 }
6945 } 6942 }
6943
6944 for (i = 0; i < adapter->num_tx_queues; i++) {
6945 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6946 u64 bytes, packets;
6947 unsigned int start;
6948
6949 if (ring) {
6950 do {
6951 start = u64_stats_fetch_begin_bh(&ring->syncp);
6952 packets = ring->stats.packets;
6953 bytes = ring->stats.bytes;
6954 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6955 stats->tx_packets += packets;
6956 stats->tx_bytes += bytes;
6957 }
6958 }
6946 rcu_read_unlock(); 6959 rcu_read_unlock();
6947 /* following stats updated by ixgbe_watchdog_task() */ 6960 /* following stats updated by ixgbe_watchdog_task() */
6948 stats->multicast = netdev->stats.multicast; 6961 stats->multicast = netdev->stats.multicast;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113
114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
115{ 114{
116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
117 vmolr |= (IXGBE_VMOLR_ROMPE | 116 vmolr |= (IXGBE_VMOLR_ROMPE |
118 IXGBE_VMOLR_ROPE |
119 IXGBE_VMOLR_BAM); 117 IXGBE_VMOLR_BAM);
120 if (aupe) 118 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE; 119 vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
133 } 133 }
134 134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
137 IXGBE_WRITE_FLUSH(hw); 137 IXGBE_WRITE_FLUSH(hw);
138 138
139 /* Poll for reset bit to self-clear indicating reset is complete */ 139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) { 140 for (i = 0; i < 10; i++) {
141 udelay(1); 141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST)) 143 if (!(ctrl & reset_bit))
144 break; 144 break;
145 } 145 }
146 if (ctrl & IXGBE_CTRL_RST) { 146 if (ctrl & reset_bit) {
147 status = IXGBE_ERR_RESET_FAILED; 147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 149 }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845affea13..5933621ac3ff 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@ err:
585 rcu_read_lock_bh(); 585 rcu_read_lock_bh();
586 vlan = rcu_dereference(q->vlan); 586 vlan = rcu_dereference(q->vlan);
587 if (vlan) 587 if (vlan)
588 netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; 588 vlan->dev->stats.tx_dropped++;
589 rcu_read_unlock_bh(); 589 rcu_read_unlock_bh();
590 590
591 return err; 591 return err;
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42d0ced..32f947154c33 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
113void mlx4_start_catas_poll(struct mlx4_dev *dev) 113void mlx4_start_catas_poll(struct mlx4_dev *dev)
114{ 114{
115 struct mlx4_priv *priv = mlx4_priv(dev); 115 struct mlx4_priv *priv = mlx4_priv(dev);
116 unsigned long addr; 116 phys_addr_t addr;
117 117
118 INIT_LIST_HEAD(&priv->catas_err.list); 118 INIT_LIST_HEAD(&priv->catas_err.list);
119 init_timer(&priv->catas_err.timer); 119 init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
124 124
125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
126 if (!priv->catas_err.map) { 126 if (!priv->catas_err.map) {
127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", 127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
128 addr); 128 (unsigned long long) addr);
129 return; 129 return;
130 } 130 }
131 131
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index f6e0d40cd876..1ff6ca6466ed 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
202 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
203 goto err_pd; 203 goto err_pd;
204 204
205 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 205 mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
206 PAGE_SIZE);
206 if (!mdev->uar_map) 207 if (!mdev->uar_map)
207 goto err_uar; 208 goto err_uar;
208 spin_lock_init(&mdev->uar_lock); 209 spin_lock_init(&mdev->uar_lock);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 782f11d8fa71..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
829 goto err_uar_table_free; 829 goto err_uar_table_free;
830 } 830 }
831 831
832 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 832 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
833 if (!priv->kar) { 833 if (!priv->kar) {
834 mlx4_err(dev, "Couldn't map kernel access region, " 834 mlx4_err(dev, "Couldn't map kernel access region, "
835 "aborting.\n"); 835 "aborting.\n");
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1289 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1290 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1291 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1292 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1293 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1294 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1295 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1296 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1297 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1298 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1299 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1300 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1301 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1302 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1303 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1289 { 0, } 1304 { 0, }
1290}; 1305};
1291 1306
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7ef7b6..79cf42db2ea9 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
95 * entry in hash chain and *mgm holds end of hash chain. 95 * entry in hash chain and *mgm holds end of hash chain.
96 */ 96 */
97static int find_mgm(struct mlx4_dev *dev, 97static int find_mgm(struct mlx4_dev *dev,
98 u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, 98 u8 *gid, enum mlx4_protocol protocol,
99 struct mlx4_cmd_mailbox *mgm_mailbox,
99 u16 *hash, int *prev, int *index) 100 u16 *hash, int *prev, int *index)
100{ 101{
101 struct mlx4_cmd_mailbox *mailbox; 102 struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev,
134 return err; 135 return err;
135 } 136 }
136 137
137 if (!memcmp(mgm->gid, gid, 16)) 138 if (!memcmp(mgm->gid, gid, 16) &&
139 be32_to_cpu(mgm->members_count) >> 30 == protocol)
138 return err; 140 return err;
139 141
140 *prev = *index; 142 *prev = *index;
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev,
146} 148}
147 149
148int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 150int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
149 int block_mcast_loopback) 151 int block_mcast_loopback, enum mlx4_protocol protocol)
150{ 152{
151 struct mlx4_priv *priv = mlx4_priv(dev); 153 struct mlx4_priv *priv = mlx4_priv(dev);
152 struct mlx4_cmd_mailbox *mailbox; 154 struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
165 167
166 mutex_lock(&priv->mcg_table.mutex); 168 mutex_lock(&priv->mcg_table.mutex);
167 169
168 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 170 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
169 if (err) 171 if (err)
170 goto out; 172 goto out;
171 173
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
187 memcpy(mgm->gid, gid, 16); 189 memcpy(mgm->gid, gid, 16);
188 } 190 }
189 191
190 members_count = be32_to_cpu(mgm->members_count); 192 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
191 if (members_count == MLX4_QP_PER_MGM) { 193 if (members_count == MLX4_QP_PER_MGM) {
192 mlx4_err(dev, "MGM at index %x is full.\n", index); 194 mlx4_err(dev, "MGM at index %x is full.\n", index);
193 err = -ENOMEM; 195 err = -ENOMEM;
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
207 else 209 else
208 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
209 211
210 mgm->members_count = cpu_to_be32(members_count); 212 mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
211 213
212 err = mlx4_WRITE_MCG(dev, index, mailbox); 214 err = mlx4_WRITE_MCG(dev, index, mailbox);
213 if (err) 215 if (err)
@@ -242,7 +244,8 @@ out:
242} 244}
243EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 245EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
244 246
245int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) 247int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
248 enum mlx4_protocol protocol)
246{ 249{
247 struct mlx4_priv *priv = mlx4_priv(dev); 250 struct mlx4_priv *priv = mlx4_priv(dev);
248 struct mlx4_cmd_mailbox *mailbox; 251 struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
260 263
261 mutex_lock(&priv->mcg_table.mutex); 264 mutex_lock(&priv->mcg_table.mutex);
262 265
263 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 266 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
264 if (err) 267 if (err)
265 goto out; 268 goto out;
266 269
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
270 goto out; 273 goto out;
271 } 274 }
272 275
273 members_count = be32_to_cpu(mgm->members_count); 276 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
274 for (loc = -1, i = 0; i < members_count; ++i) 277 for (loc = -1, i = 0; i < members_count; ++i)
275 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
276 loc = i; 279 loc = i;
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
282 } 285 }
283 286
284 287
285 mgm->members_count = cpu_to_be32(--members_count); 288 mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
286 mgm->qp[loc] = mgm->qp[i - 1]; 289 mgm->qp[loc] = mgm->qp[i - 1];
287 mgm->qp[i - 1] = 0; 290 mgm->qp[i - 1] = 0;
288 291
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a37fcf11ab36..ea5cfe2c3a04 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
3403 return -EIO; 3403 return -EIO;
3404 } 3404 }
3405 3405
3406 status = pci_restore_state(pdev); 3406 pci_restore_state(pdev);
3407 if (status)
3408 return status;
3409 3407
3410 status = pci_enable_device(pdev); 3408 status = pci_enable_device(pdev);
3411 if (status) { 3409 if (status) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad82..9fb59d3f9c92 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
4489{ 4489{
4490 struct niu_parent *parent = np->parent; 4490 struct niu_parent *parent = np->parent;
4491 int first_rx_channel, first_tx_channel; 4491 int first_rx_channel, first_tx_channel;
4492 int num_rx_rings, num_tx_rings;
4493 struct rx_ring_info *rx_rings;
4494 struct tx_ring_info *tx_rings;
4492 int i, port, err; 4495 int i, port, err;
4493 4496
4494 port = np->port; 4497 port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
4498 first_tx_channel += parent->txchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i];
4499 } 4502 }
4500 4503
4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 num_rx_rings = parent->rxchan_per_port[port];
4502 np->num_tx_rings = parent->txchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port];
4503 4506
4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); 4508 GFP_KERNEL);
4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!rx_rings)
4511 goto out_err; 4511 goto out_err;
4512 4512
4513 np->num_rx_rings = num_rx_rings;
4514 smp_wmb();
4515 np->rx_rings = rx_rings;
4516
4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4518
4513 for (i = 0; i < np->num_rx_rings; i++) { 4519 for (i = 0; i < np->num_rx_rings; i++) {
4514 struct rx_ring_info *rp = &np->rx_rings[i]; 4520 struct rx_ring_info *rp = &np->rx_rings[i];
4515 4521
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4544 return err;
4539 } 4545 }
4540 4546
4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4548 GFP_KERNEL);
4543 err = -ENOMEM; 4549 err = -ENOMEM;
4544 if (!np->tx_rings) 4550 if (!tx_rings)
4545 goto out_err; 4551 goto out_err;
4546 4552
4553 np->num_tx_rings = num_tx_rings;
4554 smp_wmb();
4555 np->tx_rings = tx_rings;
4556
4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4558
4547 for (i = 0; i < np->num_tx_rings; i++) { 4559 for (i = 0; i < np->num_tx_rings; i++) {
4548 struct tx_ring_info *rp = &np->tx_rings[i]; 4560 struct tx_ring_info *rp = &np->tx_rings[i];
4549 4561
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
6246static void niu_get_rx_stats(struct niu *np) 6258static void niu_get_rx_stats(struct niu *np)
6247{ 6259{
6248 unsigned long pkts, dropped, errors, bytes; 6260 unsigned long pkts, dropped, errors, bytes;
6261 struct rx_ring_info *rx_rings;
6249 int i; 6262 int i;
6250 6263
6251 pkts = dropped = errors = bytes = 0; 6264 pkts = dropped = errors = bytes = 0;
6265
6266 rx_rings = ACCESS_ONCE(np->rx_rings);
6267 if (!rx_rings)
6268 goto no_rings;
6269
6252 for (i = 0; i < np->num_rx_rings; i++) { 6270 for (i = 0; i < np->num_rx_rings; i++) {
6253 struct rx_ring_info *rp = &np->rx_rings[i]; 6271 struct rx_ring_info *rp = &rx_rings[i];
6254 6272
6255 niu_sync_rx_discard_stats(np, rp, 0); 6273 niu_sync_rx_discard_stats(np, rp, 0);
6256 6274
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
6259 dropped += rp->rx_dropped; 6277 dropped += rp->rx_dropped;
6260 errors += rp->rx_errors; 6278 errors += rp->rx_errors;
6261 } 6279 }
6280
6281no_rings:
6262 np->dev->stats.rx_packets = pkts; 6282 np->dev->stats.rx_packets = pkts;
6263 np->dev->stats.rx_bytes = bytes; 6283 np->dev->stats.rx_bytes = bytes;
6264 np->dev->stats.rx_dropped = dropped; 6284 np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
6268static void niu_get_tx_stats(struct niu *np) 6288static void niu_get_tx_stats(struct niu *np)
6269{ 6289{
6270 unsigned long pkts, errors, bytes; 6290 unsigned long pkts, errors, bytes;
6291 struct tx_ring_info *tx_rings;
6271 int i; 6292 int i;
6272 6293
6273 pkts = errors = bytes = 0; 6294 pkts = errors = bytes = 0;
6295
6296 tx_rings = ACCESS_ONCE(np->tx_rings);
6297 if (!tx_rings)
6298 goto no_rings;
6299
6274 for (i = 0; i < np->num_tx_rings; i++) { 6300 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i]; 6301 struct tx_ring_info *rp = &tx_rings[i];
6276 6302
6277 pkts += rp->tx_packets; 6303 pkts += rp->tx_packets;
6278 bytes += rp->tx_bytes; 6304 bytes += rp->tx_bytes;
6279 errors += rp->tx_errors; 6305 errors += rp->tx_errors;
6280 } 6306 }
6307
6308no_rings:
6281 np->dev->stats.tx_packets = pkts; 6309 np->dev->stats.tx_packets = pkts;
6282 np->dev->stats.tx_bytes = bytes; 6310 np->dev->stats.tx_bytes = bytes;
6283 np->dev->stats.tx_errors = errors; 6311 np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6287{ 6315{
6288 struct niu *np = netdev_priv(dev); 6316 struct niu *np = netdev_priv(dev);
6289 6317
6290 niu_get_rx_stats(np); 6318 if (netif_running(dev)) {
6291 niu_get_tx_stats(np); 6319 niu_get_rx_stats(np);
6292 6320 niu_get_tx_stats(np);
6321 }
6293 return &dev->stats; 6322 return &dev->stats;
6294} 6323}
6295 6324
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1988 } 1988 }
1989 1989
1990 ndev = alloc_etherdev(sizeof(struct ns83820)); 1990 ndev = alloc_etherdev(sizeof(struct ns83820));
1991 dev = PRIV(ndev);
1992
1993 err = -ENOMEM; 1991 err = -ENOMEM;
1994 if (!dev) 1992 if (!ndev)
1995 goto out; 1993 goto out;
1996 1994
1995 dev = PRIV(ndev);
1997 dev->ndev = ndev; 1996 dev->ndev = ndev;
1998 1997
1999 spin_lock_init(&dev->rx_info.lock); 1998 spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a738..4c9a7d4f3fca 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -519,7 +519,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
519 struct pch_gbe_adapter *adapter; 519 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task); 520 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521 521
522 rtnl_lock();
522 pch_gbe_reinit_locked(adapter); 523 pch_gbe_reinit_locked(adapter);
524 rtnl_unlock();
523} 525}
524 526
525/** 527/**
@@ -528,14 +530,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
528 */ 530 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) 531void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{ 532{
531 struct net_device *netdev = adapter->netdev; 533 pch_gbe_down(adapter);
532 534 pch_gbe_up(adapter);
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539} 535}
540 536
541/** 537/**
@@ -2247,7 +2243,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
2247 struct net_device *netdev = pci_get_drvdata(pdev); 2243 struct net_device *netdev = pci_get_drvdata(pdev);
2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2244 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2249 2245
2250 flush_scheduled_work(); 2246 cancel_work_sync(&adapter->reset_task);
2251 unregister_netdev(netdev); 2247 unregister_netdev(netdev);
2252 2248
2253 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2249 pch_gbe_hal_phy_hw_reset(&adapter->hw);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac8551..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
1488 1488
1489 /* 1489 /*
1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
1491 * Early datasheets said to poll the reset bit, but now they say that 1491 * We wait at least 2ms.
1492 * it "is not a reliable indicator and subsequently should be ignored."
1493 * We wait at least 10ms.
1494 */ 1492 */
1495 1493
1496 mdelay(10); 1494 mdelay(2);
1497 1495
1498 /* 1496 /*
1499 * Reset RBCR[01] back to zero as per magic incantation. 1497 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bb8645ab247c..59ccf0c5c610 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -554,6 +554,8 @@ struct rtl8169_private {
554 struct mii_if_info mii; 554 struct mii_if_info mii;
555 struct rtl8169_counters counters; 555 struct rtl8169_counters counters;
556 u32 saved_wolopts; 556 u32 saved_wolopts;
557
558 const struct firmware *fw;
557}; 559};
558 560
559MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 561MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -971,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
971 if (pm) 973 if (pm)
972 pm_request_resume(&tp->pci_dev->dev); 974 pm_request_resume(&tp->pci_dev->dev);
973 netif_carrier_on(dev); 975 netif_carrier_on(dev);
974 netif_info(tp, ifup, dev, "link up\n"); 976 if (net_ratelimit())
977 netif_info(tp, ifup, dev, "link up\n");
975 } else { 978 } else {
976 netif_carrier_off(dev); 979 netif_carrier_off(dev);
977 netif_info(tp, ifdown, dev, "link down\n"); 980 netif_info(tp, ifdown, dev, "link down\n");
@@ -1766,6 +1769,29 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1766 } 1769 }
1767} 1770}
1768 1771
1772static void rtl_release_firmware(struct rtl8169_private *tp)
1773{
1774 release_firmware(tp->fw);
1775 tp->fw = NULL;
1776}
1777
1778static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
1779{
1780 const struct firmware **fw = &tp->fw;
1781 int rc = !*fw;
1782
1783 if (rc) {
1784 rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
1785 if (rc < 0)
1786 goto out;
1787 }
1788
1789 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
1790 rtl_phy_write_fw(tp, *fw);
1791out:
1792 return rc;
1793}
1794
1769static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) 1795static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1770{ 1796{
1771 static const struct phy_reg phy_reg_init[] = { 1797 static const struct phy_reg phy_reg_init[] = {
@@ -2139,7 +2165,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2139 { 0x0d, 0xf880 } 2165 { 0x0d, 0xf880 }
2140 }; 2166 };
2141 void __iomem *ioaddr = tp->mmio_addr; 2167 void __iomem *ioaddr = tp->mmio_addr;
2142 const struct firmware *fw;
2143 2168
2144 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2169 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2145 2170
@@ -2203,11 +2228,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2203 2228
2204 rtl_writephy(tp, 0x1f, 0x0005); 2229 rtl_writephy(tp, 0x1f, 0x0005);
2205 rtl_writephy(tp, 0x05, 0x001b); 2230 rtl_writephy(tp, 0x05, 0x001b);
2206 if (rtl_readphy(tp, 0x06) == 0xbf00 && 2231 if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
2207 request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { 2232 (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
2208 rtl_phy_write_fw(tp, fw);
2209 release_firmware(fw);
2210 } else {
2211 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2233 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2212 } 2234 }
2213 2235
@@ -2257,7 +2279,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2257 { 0x0d, 0xf880 } 2279 { 0x0d, 0xf880 }
2258 }; 2280 };
2259 void __iomem *ioaddr = tp->mmio_addr; 2281 void __iomem *ioaddr = tp->mmio_addr;
2260 const struct firmware *fw;
2261 2282
2262 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2283 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2263 2284
@@ -2312,11 +2333,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2312 2333
2313 rtl_writephy(tp, 0x1f, 0x0005); 2334 rtl_writephy(tp, 0x1f, 0x0005);
2314 rtl_writephy(tp, 0x05, 0x001b); 2335 rtl_writephy(tp, 0x05, 0x001b);
2315 if (rtl_readphy(tp, 0x06) == 0xb300 && 2336 if ((rtl_readphy(tp, 0x06) != 0xb300) ||
2316 request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { 2337 (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
2317 rtl_phy_write_fw(tp, fw);
2318 release_firmware(fw);
2319 } else {
2320 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2338 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2321 } 2339 }
2322 2340
@@ -3200,6 +3218,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3200 3218
3201 cancel_delayed_work_sync(&tp->task); 3219 cancel_delayed_work_sync(&tp->task);
3202 3220
3221 rtl_release_firmware(tp);
3222
3203 unregister_netdev(dev); 3223 unregister_netdev(dev);
3204 3224
3205 if (pci_dev_run_wake(pdev)) 3225 if (pci_dev_run_wake(pdev))
@@ -3738,7 +3758,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
3738 RTL_W16(IntrMitigate, 0x5151); 3758 RTL_W16(IntrMitigate, 0x5151);
3739 3759
3740 /* Work around for RxFIFO overflow. */ 3760 /* Work around for RxFIFO overflow. */
3741 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 3761 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
3762 tp->mac_version == RTL_GIGA_MAC_VER_22) {
3742 tp->intr_event |= RxFIFOOver | PCSTimeout; 3763 tp->intr_event |= RxFIFOOver | PCSTimeout;
3743 tp->intr_event &= ~RxOverflow; 3764 tp->intr_event &= ~RxOverflow;
3744 } 3765 }
@@ -4620,12 +4641,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4620 break; 4641 break;
4621 } 4642 }
4622 4643
4623 /* Work around for rx fifo overflow */ 4644 if (unlikely(status & RxFIFOOver)) {
4624 if (unlikely(status & RxFIFOOver) && 4645 switch (tp->mac_version) {
4625 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4646 /* Work around for rx fifo overflow */
4626 netif_stop_queue(dev); 4647 case RTL_GIGA_MAC_VER_11:
4627 rtl8169_tx_timeout(dev); 4648 case RTL_GIGA_MAC_VER_22:
4628 break; 4649 case RTL_GIGA_MAC_VER_26:
4650 netif_stop_queue(dev);
4651 rtl8169_tx_timeout(dev);
4652 goto done;
4653 /* Testers needed. */
4654 case RTL_GIGA_MAC_VER_17:
4655 case RTL_GIGA_MAC_VER_19:
4656 case RTL_GIGA_MAC_VER_20:
4657 case RTL_GIGA_MAC_VER_21:
4658 case RTL_GIGA_MAC_VER_23:
4659 case RTL_GIGA_MAC_VER_24:
4660 case RTL_GIGA_MAC_VER_27:
4661 case RTL_GIGA_MAC_VER_28:
4662 /* Experimental science. Pktgen proof. */
4663 case RTL_GIGA_MAC_VER_12:
4664 case RTL_GIGA_MAC_VER_25:
4665 if (status == RxFIFOOver)
4666 goto done;
4667 break;
4668 default:
4669 break;
4670 }
4629 } 4671 }
4630 4672
4631 if (unlikely(status & SYSErr)) { 4673 if (unlikely(status & SYSErr)) {
@@ -4661,7 +4703,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4661 (status & RxFIFOOver) ? (status | RxOverflow) : status); 4703 (status & RxFIFOOver) ? (status | RxOverflow) : status);
4662 status = RTL_R16(IntrStatus); 4704 status = RTL_R16(IntrStatus);
4663 } 4705 }
4664 4706done:
4665 return IRQ_RETVAL(handled); 4707 return IRQ_RETVAL(handled);
4666} 4708}
4667 4709
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c6e675..002bac743843 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void)
1153 int count; 1153 int count;
1154 int cpu; 1154 int cpu;
1155 1155
1156 if (rss_cpus)
1157 return rss_cpus;
1158
1156 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1159 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
1157 printk(KERN_WARNING 1160 printk(KERN_WARNING
1158 "sfc: RSS disabled due to allocation failure\n"); 1161 "sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1266 efx->legacy_irq = 0; 1269 efx->legacy_irq = 0;
1267} 1270}
1268 1271
1269struct efx_tx_queue *
1270efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1271{
1272 unsigned tx_channel_offset =
1273 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1274 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1275 type >= EFX_TXQ_TYPES);
1276 return &efx->channel[tx_channel_offset + index]->tx_queue[type];
1277}
1278
1279static void efx_set_channels(struct efx_nic *efx) 1272static void efx_set_channels(struct efx_nic *efx)
1280{ 1273{
1281 struct efx_channel *channel; 1274 struct efx_channel *channel;
1282 struct efx_tx_queue *tx_queue; 1275 struct efx_tx_queue *tx_queue;
1283 unsigned tx_channel_offset = 1276
1277 efx->tx_channel_offset =
1284 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1285 1279
1286 /* Channel pointers were set in efx_init_struct() but we now 1280 /* Channel pointers were set in efx_init_struct() but we now
1287 * need to clear them for TX queues in any RX-only channels. */ 1281 * need to clear them for TX queues in any RX-only channels. */
1288 efx_for_each_channel(channel, efx) { 1282 efx_for_each_channel(channel, efx) {
1289 if (channel->channel - tx_channel_offset >= 1283 if (channel->channel - efx->tx_channel_offset >=
1290 efx->n_tx_channels) { 1284 efx->n_tx_channels) {
1291 efx_for_each_channel_tx_queue(tx_queue, channel) 1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1292 tx_queue->channel = NULL; 1286 tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 70e4f7dcce81..61ddd2c6e750 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1107,22 +1107,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1107 1107
1108 /* Restore PCI configuration if needed */ 1108 /* Restore PCI configuration if needed */
1109 if (method == RESET_TYPE_WORLD) { 1109 if (method == RESET_TYPE_WORLD) {
1110 if (efx_nic_is_dual_func(efx)) { 1110 if (efx_nic_is_dual_func(efx))
1111 rc = pci_restore_state(nic_data->pci_dev2); 1111 pci_restore_state(nic_data->pci_dev2);
1112 if (rc) { 1112 pci_restore_state(efx->pci_dev);
1113 netif_err(efx, drv, efx->net_dev,
1114 "failed to restore PCI config for "
1115 "the secondary function\n");
1116 goto fail3;
1117 }
1118 }
1119 rc = pci_restore_state(efx->pci_dev);
1120 if (rc) {
1121 netif_err(efx, drv, efx->net_dev,
1122 "failed to restore PCI config for the "
1123 "primary function\n");
1124 goto fail4;
1125 }
1126 netif_dbg(efx, drv, efx->net_dev, 1113 netif_dbg(efx, drv, efx->net_dev,
1127 "successfully restored PCI config\n"); 1114 "successfully restored PCI config\n");
1128 } 1115 }
@@ -1133,7 +1120,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1133 rc = -ETIMEDOUT; 1120 rc = -ETIMEDOUT;
1134 netif_err(efx, hw, efx->net_dev, 1121 netif_err(efx, hw, efx->net_dev,
1135 "timed out waiting for hardware reset\n"); 1122 "timed out waiting for hardware reset\n");
1136 goto fail5; 1123 goto fail3;
1137 } 1124 }
1138 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); 1125 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1139 1126
@@ -1141,11 +1128,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1141 1128
1142 /* pci_save_state() and pci_restore_state() MUST be called in pairs */ 1129 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1143fail2: 1130fail2:
1144fail3:
1145 pci_restore_state(efx->pci_dev); 1131 pci_restore_state(efx->pci_dev);
1146fail1: 1132fail1:
1147fail4: 1133fail3:
1148fail5:
1149 return rc; 1134 return rc;
1150} 1135}
1151 1136
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66ddf93a..28df8665256a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@ struct efx_nic {
735 unsigned next_buffer_table; 735 unsigned next_buffer_table;
736 unsigned n_channels; 736 unsigned n_channels;
737 unsigned n_rx_channels; 737 unsigned n_rx_channels;
738 unsigned tx_channel_offset;
738 unsigned n_tx_channels; 739 unsigned n_tx_channels;
739 unsigned int rx_buffer_len; 740 unsigned int rx_buffer_len;
740 unsigned int rx_buffer_order; 741 unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
929 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 930 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
930 (_efx)->channel[_channel->channel + 1] : NULL) 931 (_efx)->channel[_channel->channel + 1] : NULL)
931 932
932extern struct efx_tx_queue * 933static inline struct efx_tx_queue *
933efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); 934efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
935{
936 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
937 type >= EFX_TXQ_TYPES);
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939}
934 940
935static inline struct efx_tx_queue * 941static inline struct efx_tx_queue *
936efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df1..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n", 1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1778 net_dev->name, sis_priv->cur_rx, 1778 net_dev->name, sis_priv->cur_rx,
1779 sis_priv->dirty_rx); 1779 sis_priv->dirty_rx);
1780 dev_kfree_skb(skb);
1780 break; 1781 break;
1781 } 1782 }
1782 1783
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f69998..93b32d366611 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -60,12 +60,6 @@
60#define BAR_0 0 60#define BAR_0 0
61#define BAR_2 2 61#define BAR_2 2
62 62
63#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
64#define TG3_VLAN_TAG_USED 1
65#else
66#define TG3_VLAN_TAG_USED 0
67#endif
68
69#include "tg3.h" 63#include "tg3.h"
70 64
71#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
@@ -134,9 +128,6 @@
134 TG3_TX_RING_SIZE) 128 TG3_TX_RING_SIZE)
135#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 130
137#define TG3_RX_DMA_ALIGN 16
138#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139
140#define TG3_DMA_BYTE_ENAB 64 131#define TG3_DMA_BYTE_ENAB 64
141 132
142#define TG3_RX_STD_DMA_SZ 1536 133#define TG3_RX_STD_DMA_SZ 1536
@@ -4722,8 +4713,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4722 struct sk_buff *skb; 4713 struct sk_buff *skb;
4723 dma_addr_t dma_addr; 4714 dma_addr_t dma_addr;
4724 u32 opaque_key, desc_idx, *post_ptr; 4715 u32 opaque_key, desc_idx, *post_ptr;
4725 bool hw_vlan __maybe_unused = false;
4726 u16 vtag __maybe_unused = 0;
4727 4716
4728 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4717 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4729 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4718 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4771,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4782 tg3_recycle_rx(tnapi, tpr, opaque_key, 4771 tg3_recycle_rx(tnapi, tpr, opaque_key,
4783 desc_idx, *post_ptr); 4772 desc_idx, *post_ptr);
4784 4773
4785 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4774 copy_skb = netdev_alloc_skb(tp->dev, len +
4786 TG3_RAW_IP_ALIGN); 4775 TG3_RAW_IP_ALIGN);
4787 if (copy_skb == NULL) 4776 if (copy_skb == NULL)
4788 goto drop_it_no_recycle; 4777 goto drop_it_no_recycle;
4789 4778
4790 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4779 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4791 skb_put(copy_skb, len); 4780 skb_put(copy_skb, len);
4792 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4781 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4793 skb_copy_from_linear_data(skb, copy_skb->data, len); 4782 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4803,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4814 } 4803 }
4815 4804
4816 if (desc->type_flags & RXD_FLAG_VLAN && 4805 if (desc->type_flags & RXD_FLAG_VLAN &&
4817 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4806 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4818 vtag = desc->err_vlan & RXD_VLAN_MASK; 4807 __vlan_hwaccel_put_tag(skb,
4819#if TG3_VLAN_TAG_USED 4808 desc->err_vlan & RXD_VLAN_MASK);
4820 if (tp->vlgrp)
4821 hw_vlan = true;
4822 else
4823#endif
4824 {
4825 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4826 __skb_push(skb, VLAN_HLEN);
4827
4828 memmove(ve, skb->data + VLAN_HLEN,
4829 ETH_ALEN * 2);
4830 ve->h_vlan_proto = htons(ETH_P_8021Q);
4831 ve->h_vlan_TCI = htons(vtag);
4832 }
4833 }
4834 4809
4835#if TG3_VLAN_TAG_USED 4810 napi_gro_receive(&tnapi->napi, skb);
4836 if (hw_vlan)
4837 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4838 else
4839#endif
4840 napi_gro_receive(&tnapi->napi, skb);
4841 4811
4842 received++; 4812 received++;
4843 budget--; 4813 budget--;
@@ -5740,11 +5710,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5740 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5710 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5741 } 5711 }
5742 5712
5743#if TG3_VLAN_TAG_USED
5744 if (vlan_tx_tag_present(skb)) 5713 if (vlan_tx_tag_present(skb))
5745 base_flags |= (TXD_FLAG_VLAN | 5714 base_flags |= (TXD_FLAG_VLAN |
5746 (vlan_tx_tag_get(skb) << 16)); 5715 (vlan_tx_tag_get(skb) << 16));
5747#endif
5748 5716
5749 len = skb_headlen(skb); 5717 len = skb_headlen(skb);
5750 5718
@@ -5986,11 +5954,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5986 } 5954 }
5987 } 5955 }
5988 } 5956 }
5989#if TG3_VLAN_TAG_USED 5957
5990 if (vlan_tx_tag_present(skb)) 5958 if (vlan_tx_tag_present(skb))
5991 base_flags |= (TXD_FLAG_VLAN | 5959 base_flags |= (TXD_FLAG_VLAN |
5992 (vlan_tx_tag_get(skb) << 16)); 5960 (vlan_tx_tag_get(skb) << 16));
5993#endif
5994 5961
5995 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5962 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5996 !mss && skb->len > VLAN_ETH_FRAME_LEN) 5963 !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -9532,17 +9499,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9532 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9499 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9533 RX_MODE_KEEP_VLAN_TAG); 9500 RX_MODE_KEEP_VLAN_TAG);
9534 9501
9502#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9535 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9503 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9536 * flag clear. 9504 * flag clear.
9537 */ 9505 */
9538#if TG3_VLAN_TAG_USED
9539 if (!tp->vlgrp &&
9540 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9541 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9542#else
9543 /* By definition, VLAN is disabled always in this
9544 * case.
9545 */
9546 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9506 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9547 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9507 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9548#endif 9508#endif
@@ -11230,31 +11190,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11230 return -EOPNOTSUPP; 11190 return -EOPNOTSUPP;
11231} 11191}
11232 11192
11233#if TG3_VLAN_TAG_USED
11234static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11235{
11236 struct tg3 *tp = netdev_priv(dev);
11237
11238 if (!netif_running(dev)) {
11239 tp->vlgrp = grp;
11240 return;
11241 }
11242
11243 tg3_netif_stop(tp);
11244
11245 tg3_full_lock(tp, 0);
11246
11247 tp->vlgrp = grp;
11248
11249 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11250 __tg3_set_rx_mode(dev);
11251
11252 tg3_netif_start(tp);
11253
11254 tg3_full_unlock(tp);
11255}
11256#endif
11257
11258static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11193static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11259{ 11194{
11260 struct tg3 *tp = netdev_priv(dev); 11195 struct tg3 *tp = netdev_priv(dev);
@@ -13066,9 +13001,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13066 13001
13067static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13002static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
13068{ 13003{
13069#if TG3_VLAN_TAG_USED
13070 dev->vlan_features |= flags; 13004 dev->vlan_features |= flags;
13071#endif
13072} 13005}
13073 13006
13074static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13007static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13861,11 +13794,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13861 else 13794 else
13862 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13795 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13863 13796
13864 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13797 tp->rx_offset = NET_IP_ALIGN;
13865 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13798 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13867 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13800 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13868 tp->rx_offset -= NET_IP_ALIGN; 13801 tp->rx_offset = 0;
13869#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13802#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13870 tp->rx_copy_thresh = ~(u16)0; 13803 tp->rx_copy_thresh = ~(u16)0;
13871#endif 13804#endif
@@ -14629,9 +14562,6 @@ static const struct net_device_ops tg3_netdev_ops = {
14629 .ndo_do_ioctl = tg3_ioctl, 14562 .ndo_do_ioctl = tg3_ioctl,
14630 .ndo_tx_timeout = tg3_tx_timeout, 14563 .ndo_tx_timeout = tg3_tx_timeout,
14631 .ndo_change_mtu = tg3_change_mtu, 14564 .ndo_change_mtu = tg3_change_mtu,
14632#if TG3_VLAN_TAG_USED
14633 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14634#endif
14635#ifdef CONFIG_NET_POLL_CONTROLLER 14565#ifdef CONFIG_NET_POLL_CONTROLLER
14636 .ndo_poll_controller = tg3_poll_controller, 14566 .ndo_poll_controller = tg3_poll_controller,
14637#endif 14567#endif
@@ -14648,9 +14578,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14648 .ndo_do_ioctl = tg3_ioctl, 14578 .ndo_do_ioctl = tg3_ioctl,
14649 .ndo_tx_timeout = tg3_tx_timeout, 14579 .ndo_tx_timeout = tg3_tx_timeout,
14650 .ndo_change_mtu = tg3_change_mtu, 14580 .ndo_change_mtu = tg3_change_mtu,
14651#if TG3_VLAN_TAG_USED
14652 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14653#endif
14654#ifdef CONFIG_NET_POLL_CONTROLLER 14581#ifdef CONFIG_NET_POLL_CONTROLLER
14655 .ndo_poll_controller = tg3_poll_controller, 14582 .ndo_poll_controller = tg3_poll_controller,
14656#endif 14583#endif
@@ -14700,9 +14627,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14700 14627
14701 SET_NETDEV_DEV(dev, &pdev->dev); 14628 SET_NETDEV_DEV(dev, &pdev->dev);
14702 14629
14703#if TG3_VLAN_TAG_USED
14704 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14630 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14705#endif
14706 14631
14707 tp = netdev_priv(dev); 14632 tp = netdev_priv(dev);
14708 tp->pdev = pdev; 14633 tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c82..f528243e1a4f 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2808,9 +2808,6 @@ struct tg3 {
2808 u32 rx_std_max_post; 2808 u32 rx_std_max_post;
2809 u32 rx_offset; 2809 u32 rx_offset;
2810 u32 rx_pkt_map_sz; 2810 u32 rx_pkt_map_sz;
2811#if TG3_VLAN_TAG_USED
2812 struct vlan_group *vlgrp;
2813#endif
2814 2811
2815 2812
2816 /* begin "everything else" cacheline(s) section */ 2813 /* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5ec65b..7cb301da7474 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
142MODULE_AUTHOR("Tilera"); 142MODULE_AUTHOR("Tilera");
143MODULE_LICENSE("GPL"); 143MODULE_LICENSE("GPL");
144 144
145
146#define IS_MULTICAST(mac_addr) \
147 (((u8 *)(mac_addr))[0] & 0x01)
148
149#define IS_BROADCAST(mac_addr) \
150 (((u16 *)(mac_addr))[0] == 0xffff)
151
152
153/* 145/*
154 * Queue of incoming packets for a specific cpu and device. 146 * Queue of incoming packets for a specific cpu and device.
155 * 147 *
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
795 /* 787 /*
796 * FIXME: Implement HW multicast filter. 788 * FIXME: Implement HW multicast filter.
797 */ 789 */
798 if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { 790 if (is_unicast_ether_addr(buf)) {
799 /* Filter packets not for our address. */ 791 /* Filter packets not for our address. */
800 const u8 *mine = dev->dev_addr; 792 const u8 *mine = dev->dev_addr;
801 filter = compare_ether_addr(mine, buf); 793 filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 73a3e0d93237..715e7b47e7e9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2032,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
2032 netdev_for_each_mc_addr(ha, dev) { 2032 netdev_for_each_mc_addr(ha, dev) {
2033 /* Only support group multicast for now. 2033 /* Only support group multicast for now.
2034 */ 2034 */
2035 if (!(ha->addr[0] & 1)) 2035 if (!is_multicast_ether_addr(ha->addr))
2036 continue; 2036 continue;
2037 2037
2038 /* Ask CPM to run CRC and set bit in 2038 /* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104ab199..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cdc_ncm.c 2 * cdc_ncm.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2010 4 * Copyright (C) ST-Ericsson 2010-2011
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> 5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "30-Nov-2010" 57#define DRIVER_VERSION "7-Feb-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
77 */ 77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32 78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79 79
80/* Maximum amount of IN datagrams in NTB */
81#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
82
80/* Restart the timer, if amount of datagrams is less than given value */ 83/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 84#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82 85
@@ -85,11 +88,6 @@
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ 88 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) 89 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87 90
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data { 91struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16; 92 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16; 93 struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{ 196{
199 struct usb_cdc_notification req; 197 struct usb_cdc_notification req;
200 u32 val; 198 u32 val;
201 __le16 max_datagram_size;
202 u8 flags; 199 u8 flags;
203 u8 iface_no; 200 u8 iface_no;
204 int err; 201 int err;
202 u16 ntb_fmt_supported;
205 203
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 204 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207 205
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 221 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); 222 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 223 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
224 /* devices prior to NCM Errata shall set this field to zero */
225 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
226 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
226 227
227 if (ctx->func_desc != NULL) 228 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities; 229 flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
231 232
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 233 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 234 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n", 235 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 236 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags); 237 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
237 238
238 /* max count of tx datagrams without terminating NULL entry */ 239 /* max count of tx datagrams */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 240 if ((ctx->tx_max_datagrams == 0) ||
241 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
242 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240 243
241 /* verify maximum size of received NTB in bytes */ 244 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max < 245 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 246 pr_debug("Using min receive length=%d\n",
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { 247 USB_CDC_NCM_NTB_MIN_IN_SIZE);
248 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
249 }
250
251 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
245 pr_debug("Using default maximum receive length=%d\n", 252 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX); 253 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; 254 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 } 255 }
249 256
257 /* inform device about NTB input size changes */
258 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
259 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
260 USB_RECIP_INTERFACE;
261 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
262 req.wValue = 0;
263 req.wIndex = cpu_to_le16(iface_no);
264
265 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
266 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
267
268 req.wLength = 8;
269 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
270 ndp_in_sz.wNtbInMaxDatagrams =
271 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
272 ndp_in_sz.wReserved = 0;
273 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
274 1000);
275 } else {
276 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
277
278 req.wLength = 4;
279 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
280 NULL, 1000);
281 }
282
283 if (err)
284 pr_debug("Setting NTB Input Size failed\n");
285 }
286
250 /* verify maximum size of transmitted NTB in bytes */ 287 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max < 288 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 289 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
297 /* additional configuration */ 334 /* additional configuration */
298 335
299 /* set CRC Mode */ 336 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 337 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
301 req.bNotificationType = USB_CDC_SET_CRC_MODE; 338 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 339 USB_RECIP_INTERFACE;
303 req.wIndex = cpu_to_le16(iface_no); 340 req.bNotificationType = USB_CDC_SET_CRC_MODE;
304 req.wLength = 0; 341 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
305 342 req.wIndex = cpu_to_le16(iface_no);
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 343 req.wLength = 0;
307 if (err) 344
308 pr_debug("Setting CRC mode off failed\n"); 345 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
346 if (err)
347 pr_debug("Setting CRC mode off failed\n");
348 }
309 349
310 /* set NTB format */ 350 /* set NTB format, if both formats are supported */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 351 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 352 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 353 USB_RECIP_INTERFACE;
314 req.wIndex = cpu_to_le16(iface_no); 354 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
315 req.wLength = 0; 355 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
356 req.wIndex = cpu_to_le16(iface_no);
357 req.wLength = 0;
358
359 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
360 if (err)
361 pr_debug("Setting NTB format to 16-bit failed\n");
362 }
316 363
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 364 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320 365
321 /* set Max Datagram Size (MTU) */ 366 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 367 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 368 __le16 max_datagram_size;
324 req.wValue = 0; 369 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
325 req.wIndex = cpu_to_le16(iface_no); 370
326 req.wLength = cpu_to_le16(2); 371 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
372 USB_RECIP_INTERFACE;
373 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
374 req.wValue = 0;
375 req.wIndex = cpu_to_le16(iface_no);
376 req.wLength = cpu_to_le16(2);
377
378 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
379 1000);
380 if (err) {
381 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
382 CDC_NCM_MIN_DATAGRAM_SIZE);
383 } else {
384 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
385 /* Check Eth descriptor value */
386 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
387 if (ctx->max_datagram_size > eth_max_sz)
388 ctx->max_datagram_size = eth_max_sz;
389 } else {
390 if (ctx->max_datagram_size >
391 CDC_NCM_MAX_DATAGRAM_SIZE)
392 ctx->max_datagram_size =
393 CDC_NCM_MAX_DATAGRAM_SIZE;
394 }
327 395
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); 396 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
329 if (err) { 397 ctx->max_datagram_size =
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", 398 CDC_NCM_MIN_DATAGRAM_SIZE;
331 CDC_NCM_MIN_DATAGRAM_SIZE); 399
332 /* use default */ 400 /* if value changed, update device */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 401 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
334 } else { 402 USB_RECIP_INTERFACE;
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 403 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
404 req.wValue = 0;
405 req.wIndex = cpu_to_le16(iface_no);
406 req.wLength = 2;
407 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
408
409 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
410 0, NULL, 1000);
411 if (err)
412 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
413 }
336 414
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 } 415 }
342 416
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 417 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
466 540
467 ctx->ether_desc = 541 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf; 542 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu = 543 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 544 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472 545
473 if (dev->hard_mtu < 546 if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) 547 dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
475 dev->hard_mtu = 548 else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; 549 dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break; 550 break;
483 551
484 case USB_CDC_NCM_TYPE: 552 case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
628 u32 offset; 696 u32 offset;
629 u32 last_offset; 697 u32 last_offset;
630 u16 n = 0; 698 u16 n = 0;
631 u8 timeout = 0; 699 u8 ready2send = 0;
632 700
633 /* if there is a remaining skb, it gets priority */ 701 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL) 702 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb); 703 swap(skb, ctx->tx_rem_skb);
636 else 704 else
637 timeout = 1; 705 ready2send = 1;
638 706
639 /* 707 /*
640 * +----------------+ 708 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
682 750
683 for (; n < ctx->tx_max_datagrams; n++) { 751 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */ 752 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max) 753 if (offset >= ctx->tx_max) {
754 ready2send = 1;
686 break; 755 break;
687 756 }
688 /* compute maximum buffer size */ 757 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset; 758 rem = ctx->tx_max - offset;
690 759
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
711 } 780 }
712 ctx->tx_rem_skb = skb; 781 ctx->tx_rem_skb = skb;
713 skb = NULL; 782 skb = NULL;
714 783 ready2send = 1;
715 /* loop one more time */
716 timeout = 1;
717 } 784 }
718 break; 785 break;
719 } 786 }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
756 ctx->tx_curr_last_offset = last_offset; 823 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb; 824 goto exit_no_skb;
758 825
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { 826 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
760 /* wait for more frames */ 827 /* wait for more frames */
761 /* push variables */ 828 /* push variables */
762 ctx->tx_curr_skb = skb_out; 829 ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 880 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 881 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 882 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 883 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus); 884 ctx->tx_ndp_modulus);
818 885
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 886 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * 892 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16)); 893 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 894 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ 895 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
829 896
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, 897 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
831 &(ctx->tx_ncm.ndp16), 898 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16)); 899 sizeof(ctx->tx_ncm.ndp16));
833 900
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + 901 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
835 sizeof(ctx->tx_ncm.ndp16), 902 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16), 903 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) * 904 (ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
868 if (ctx->tx_timer_pending != 0) { 935 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--; 936 ctx->tx_timer_pending--;
870 restart = 1; 937 restart = 1;
871 } else 938 } else {
872 restart = 0; 939 restart = 0;
940 }
873 941
874 spin_unlock(&ctx->mtx); 942 spin_unlock(&ctx->mtx);
875 943
876 if (restart) 944 if (restart) {
945 spin_lock(&ctx->mtx);
877 cdc_ncm_tx_timeout_start(ctx); 946 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL) 947 spin_unlock(&ctx->mtx);
948 } else if (ctx->netdev != NULL) {
879 usbnet_start_xmit(NULL, ctx->netdev); 949 usbnet_start_xmit(NULL, ctx->netdev);
950 }
880} 951}
881 952
882static struct sk_buff * 953static struct sk_buff *
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 971 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL) 972 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1; 973 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904 974
905 /* Start timer, if there is a remaining skb */ 975 /* Start timer, if there is a remaining skb */
906 if (need_timer) 976 if (need_timer)
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
908 978
909 if (skb_out) 979 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 980 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
981
982 spin_unlock(&ctx->mtx);
911 return skb_out; 983 return skb_out;
912 984
913error: 985error:
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
956 goto error; 1028 goto error;
957 } 1029 }
958 1030
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); 1031 temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { 1032 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n"); 1033 pr_debug("invalid DPT16 index\n");
962 goto error; 1034 goto error;
@@ -1020,14 +1092,16 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1020 if (((offset + temp) > actlen) || 1092 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { 1093 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)" 1094 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n", 1095 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb); 1096 x, offset, temp, skb_in);
1025 if (!x) 1097 if (!x)
1026 goto error; 1098 goto error;
1027 break; 1099 break;
1028 1100
1029 } else { 1101 } else {
1030 skb = skb_clone(skb_in, GFP_ATOMIC); 1102 skb = skb_clone(skb_in, GFP_ATOMIC);
1103 if (!skb)
1104 goto error;
1031 skb->len = temp; 1105 skb->len = temp;
1032 skb->data = ((u8 *)skb_in->data) + offset; 1106 skb->data = ((u8 *)skb_in->data) + offset;
1033 skb_set_tail_pointer(skb, temp); 1107 skb_set_tail_pointer(skb, temp);
@@ -1041,10 +1115,10 @@ error:
1041 1115
1042static void 1116static void
1043cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, 1117cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1044 struct connection_speed_change *data) 1118 struct usb_cdc_speed_change *data)
1045{ 1119{
1046 uint32_t rx_speed = le32_to_cpu(data->USBitRate); 1120 uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1047 uint32_t tx_speed = le32_to_cpu(data->DSBitRate); 1121 uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1048 1122
1049 /* 1123 /*
1050 * Currently the USB-NET API does not support reporting the actual 1124 * Currently the USB-NET API does not support reporting the actual
@@ -1085,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1085 /* test for split data in 8-byte chunks */ 1159 /* test for split data in 8-byte chunks */
1086 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { 1160 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1087 cdc_ncm_speed_change(ctx, 1161 cdc_ncm_speed_change(ctx,
1088 (struct connection_speed_change *)urb->transfer_buffer); 1162 (struct usb_cdc_speed_change *)urb->transfer_buffer);
1089 return; 1163 return;
1090 } 1164 }
1091 1165
@@ -1113,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1113 break; 1187 break;
1114 1188
1115 case USB_CDC_NOTIFY_SPEED_CHANGE: 1189 case USB_CDC_NOTIFY_SPEED_CHANGE:
1116 if (urb->actual_length < 1190 if (urb->actual_length < (sizeof(*event) +
1117 (sizeof(*event) + sizeof(struct connection_speed_change))) 1191 sizeof(struct usb_cdc_speed_change)))
1118 set_bit(EVENT_STS_SPLIT, &dev->flags); 1192 set_bit(EVENT_STS_SPLIT, &dev->flags);
1119 else 1193 else
1120 cdc_ncm_speed_change(ctx, 1194 cdc_ncm_speed_change(ctx,
1121 (struct connection_speed_change *) &event[1]); 1195 (struct usb_cdc_speed_change *) &event[1]);
1122 break; 1196 break;
1123 1197
1124 default: 1198 default:
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
406 406
407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
408 err("Firmware too big: %zu", fw->size); 408 err("Firmware too big: %zu", fw->size);
409 release_firmware(fw);
409 return -ENOSPC; 410 return -ENOSPC;
410 } 411 }
411 data_len = fw->size; 412 data_len = fw->size;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
446 } 446 }
447} 447}
448 448
449static void virtnet_napi_enable(struct virtnet_info *vi)
450{
451 napi_enable(&vi->napi);
452
453 /* If all buffers were filled by other side before we napi_enabled, we
454 * won't get another interrupt, so process any outstanding packets
455 * now. virtnet_poll wants re-enable the queue, so we disable here.
456 * We synchronize against interrupts via NAPI_STATE_SCHED */
457 if (napi_schedule_prep(&vi->napi)) {
458 virtqueue_disable_cb(vi->rvq);
459 __napi_schedule(&vi->napi);
460 }
461}
462
449static void refill_work(struct work_struct *work) 463static void refill_work(struct work_struct *work)
450{ 464{
451 struct virtnet_info *vi; 465 struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
454 vi = container_of(work, struct virtnet_info, refill.work); 468 vi = container_of(work, struct virtnet_info, refill.work);
455 napi_disable(&vi->napi); 469 napi_disable(&vi->napi);
456 still_empty = !try_fill_recv(vi, GFP_KERNEL); 470 still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 napi_enable(&vi->napi); 471 virtnet_napi_enable(vi);
458 472
459 /* In theory, this can happen: if we don't get any buffers in 473 /* In theory, this can happen: if we don't get any buffers in
460 * we will *never* try to fill again. */ 474 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
638{ 652{
639 struct virtnet_info *vi = netdev_priv(dev); 653 struct virtnet_info *vi = netdev_priv(dev);
640 654
641 napi_enable(&vi->napi); 655 virtnet_napi_enable(vi);
642
643 /* If all buffers were filled by other side before we napi_enabled, we
644 * won't get another interrupt, so process any outstanding packets
645 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) {
648 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi);
650 }
651 return 0; 656 return 0;
652} 657}
653 658
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5b..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
48static int enable_mq = 1; 48static int enable_mq = 1;
49static int irq_share_mode; 49static int irq_share_mode;
50 50
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
51/* 54/*
52 * Enable/Disable the given intr 55 * Enable/Disable the given intr
53 */ 56 */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139{ 142{
140 u32 ret; 143 u32 ret;
141 int i; 144 int i;
145 unsigned long flags;
142 146
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
145 adapter->link_speed = ret >> 16; 152 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */ 153 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 190
184 /* Check if there is an error on xmit/recv queues */ 191 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock);
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS); 195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock);
188 197
189 for (i = 0; i < adapter->num_tx_queues; i++) 198 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped) 199 if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
804 skb_transport_header(skb))->doff * 4; 813 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else { 815 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
811 818
812 if (ctx->ipv4) { 819 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *) 820 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb); 821 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) { 822 if (iph->protocol == IPPROTO_TCP)
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *) 823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4; 824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) { 825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
826 ctx->l4_hdr_size = 831 ctx->l4_hdr_size =
827 sizeof(struct udphdr); 832 sizeof(struct tcphdr);
828 } else { 833 else
829 ctx->l4_hdr_size = 0; 834 ctx->l4_hdr_size = 0;
830 }
831 } else { 835 } else {
832 /* for simplicity, don't copy L4 headers */ 836 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0; 837 ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared; 1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1866 unsigned long flags;
1862 1867
1863 if (grp) { 1868 if (grp) {
1864 /* add vlan rx stripping. */ 1869 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i; 1871 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp; 1872 adapter->vlan_grp = grp;
1869 1873
1870 /* update FEATURES to device */
1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /* 1874 /*
1875 * Clear entire vfTable; then enable untagged pkts. 1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns 1876 * Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1880 vfTable[i] = 0; 1880 vfTable[i] = 0;
1881 1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1885 } else { 1887 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has " 1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1900 */ 1902 */
1901 vfTable[i] = 0; 1903 vfTable[i] = 0;
1902 } 1904 }
1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1906 /* update FEATURES to device */
1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 } 1909 }
1911 } 1910 }
1912} 1911}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{ 1938{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1941 unsigned long flags;
1942 1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946} 1948}
1947 1949
1948 1950
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{ 1953{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956 unsigned long flags;
1954 1957
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1958} 1963}
1959 1964
1960 1965
@@ -1985,6 +1990,7 @@ static void
1985vmxnet3_set_mc(struct net_device *netdev) 1990vmxnet3_set_mc(struct net_device *netdev)
1986{ 1991{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1993 unsigned long flags;
1988 struct Vmxnet3_RxFilterConf *rxConf = 1994 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf; 1995 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL; 1996 u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2020 rxConf->mfTablePA = 0; 2026 rxConf->mfTablePA = 0;
2021 } 2027 }
2022 2028
2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
2023 if (new_mode != rxConf->rxMode) { 2030 if (new_mode != rxConf->rxMode) {
2024 rxConf->rxMode = cpu_to_le32(new_mode); 2031 rxConf->rxMode = cpu_to_le32(new_mode);
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 2035
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2031 2039
2032 kfree(new_table); 2040 kfree(new_table);
2033} 2041}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2080 devRead->misc.uptFeatures |= UPT1_F_LRO; 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2082 } 2090 }
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2084 adapter->vlan_grp) {
2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2086 }
2087 2093
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2168 /* rx filter settings */ 2174 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0; 2175 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter); 2176 vmxnet3_restore_vlan(adapter);
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
2171 /* the rest are already zeroed */ 2179 /* the rest are already zeroed */
2172} 2180}
2173 2181
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{ 2185{
2178 int err, i; 2186 int err, i;
2179 u32 ret; 2187 u32 ret;
2188 unsigned long flags;
2180 2189
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name, 2191 " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2206 adapter->shared_pa)); 2215 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa)); 2217 adapter->shared_pa));
2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV); 2220 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2212 2223
2213 if (ret != 0) { 2224 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
2255void 2266void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{ 2268{
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2259} 2273}
2260 2274
2261 2275
@@ -2263,12 +2277,15 @@ int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{ 2278{
2265 int i; 2279 int i;
2280 unsigned long flags;
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0; 2282 return 0;
2268 2283
2269 2284
2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV); 2287 VMXNET3_CMD_QUIESCE_DEV);
2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2272 vmxnet3_disable_all_intrs(adapter); 2289 vmxnet3_disable_all_intrs(adapter);
2273 2290
2274 for (i = 0; i < adapter->num_rx_queues; i++) 2291 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz; 2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz); 2447 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size; 2449 comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695 break; 2712 break;
2696 } else { 2713 } else {
2697 /* If fails to enable required number of MSI-x vectors 2714 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event 2715 * try enabling minimum number of vectors required.
2699 */ 2716 */
2700 vectors = vector_threshold; 2717 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2718 u32 cfg; 2735 u32 cfg;
2719 2736
2720 /* intr settings */ 2737 /* intr settings */
2738 spin_lock(&adapter->cmd_lock);
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR); 2740 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock);
2724 adapter->intr.type = cfg & 0x3; 2743 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726 2745
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2755 */ 2774 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) { 2777 || adapter->num_rx_queues != 1) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n"); 2779 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1; 2780 adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2905 adapter->netdev = netdev; 2924 adapter->netdev = netdev;
2906 adapter->pdev = pdev; 2925 adapter->pdev = pdev;
2907 2926
2927 spin_lock_init(&adapter->cmd_lock);
2908 adapter->shared = pci_alloc_consistent(adapter->pdev, 2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared), 2929 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa); 2930 &adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
3108 u8 *arpreq; 3128 u8 *arpreq;
3109 struct in_device *in_dev; 3129 struct in_device *in_dev;
3110 struct in_ifaddr *ifa; 3130 struct in_ifaddr *ifa;
3131 unsigned long flags;
3111 int i = 0; 3132 int i = 0;
3112 3133
3113 if (!netif_running(netdev)) 3134 if (!netif_running(netdev))
3114 return 0; 3135 return 0;
3115 3136
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
3116 vmxnet3_disable_all_intrs(adapter); 3140 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter); 3141 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter); 3142 vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf)); 3213 pmConf));
3190 3214
3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG); 3217 VMXNET3_CMD_UPDATE_PMCFG);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3193 3219
3194 pci_save_state(pdev); 3220 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
3204static int 3230static int
3205vmxnet3_resume(struct device *device) 3231vmxnet3_resume(struct device *device)
3206{ 3232{
3207 int err; 3233 int err, i = 0;
3234 unsigned long flags;
3208 struct pci_dev *pdev = to_pci_dev(device); 3235 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3236 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
3232 3259
3233 pci_enable_wake(pdev, PCI_D0, 0); 3260 pci_enable_wake(pdev, PCI_D0, 0);
3234 3261
3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG); 3264 VMXNET3_CMD_UPDATE_PMCFG);
3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237 vmxnet3_alloc_intr_resources(adapter); 3266 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter); 3267 vmxnet3_request_irqs(adapter);
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
3239 vmxnet3_enable_all_intrs(adapter); 3270 vmxnet3_enable_all_intrs(adapter);
3240 3271
3241 return 0; 3272 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe7..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) 45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{ 46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
48 49
49 if (adapter->rxcsum != val) { 50 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val; 51 adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
56 adapter->shared->devRead.misc.uptFeatures &= 57 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM; 58 ~UPT1_F_RXCSUM;
58 59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
61 } 64 }
62 } 65 }
63 return 0; 66 return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
68static const struct vmxnet3_stat_desc 71static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = { 72vmxnet3_tq_dev_stats[] = {
70 /* description, offset */ 73 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 74 { "Tx Queue#", 0 },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81}; 85};
82 86
83/* per tq stats maintained by the driver */ 87/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc 88static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = { 89vmxnet3_tq_driver_stats[] = {
86 /* description, offset */ 90 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 91 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) }, 92 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) }, 94 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) }, 96 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) }, 98 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) }, 100 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats, 101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) }, 102 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) }, 104 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) }, 106 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) }, 108 oversized_hdr) },
105}; 109};
106 110
107/* per rq stats maintained by the device */ 111/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc 112static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = { 113vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 114 { "Rx Queue#", 0 },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120}; 125};
121 126
122/* per rq stats maintained by the driver */ 127/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc 128static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = { 129vmxnet3_rq_driver_stats[] = {
125 /* description, offset */ 130 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) }, 132 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats, 133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) }, 134 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) }, 136 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) }, 138 rx_buf_alloc_failure) },
134}; 139};
135 140
136/* gloabl stats maintained by the driver */ 141/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc 142static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = { 143vmxnet3_global_stats[] = {
139 /* description, offset */ 144 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter, 145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) } 146 tx_timeout_count) }
142}; 147};
143 148
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 156 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 157 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 158 struct net_device_stats *net_stats = &netdev->stats;
159 unsigned long flags;
154 int i; 160 int i;
155 161
156 adapter = netdev_priv(netdev); 162 adapter = netdev_priv(netdev);
157 163
158 /* Collect the dev stats into the shared area */ 164 /* Collect the dev stats into the shared area */
165 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
160 168
161 memset(net_stats, 0, sizeof(*net_stats)); 169 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) { 170 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
193static int 201static int
194vmxnet3_get_sset_count(struct net_device *netdev, int sset) 202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
195{ 203{
204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
196 switch (sset) { 205 switch (sset) {
197 case ETH_SS_STATS: 206 case ETH_SS_STATS:
198 return ARRAY_SIZE(vmxnet3_tq_dev_stats) + 207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
199 ARRAY_SIZE(vmxnet3_tq_driver_stats) + 208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
200 ARRAY_SIZE(vmxnet3_rq_dev_stats) + 209 adapter->num_tx_queues +
201 ARRAY_SIZE(vmxnet3_rq_driver_stats) + 210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
202 ARRAY_SIZE(vmxnet3_global_stats); 213 ARRAY_SIZE(vmxnet3_global_stats);
203 default: 214 default:
204 return -EOPNOTSUPP; 215 return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
206} 217}
207 218
208 219
220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
209static int 224static int
210vmxnet3_get_regs_len(struct net_device *netdev) 225vmxnet3_get_regs_len(struct net_device *netdev)
211{ 226{
212 return 20 * sizeof(u32); 227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
213} 230}
214 231
215 232
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
240static void 257static void
241vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
242{ 259{
260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
243 if (stringset == ETH_SS_STATS) { 261 if (stringset == ETH_SS_STATS) {
244 int i; 262 int i, j;
245 263 for (j = 0; j < adapter->num_tx_queues; j++) {
246 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
247 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
248 ETH_GSTRING_LEN); 266 ETH_GSTRING_LEN);
249 buf += ETH_GSTRING_LEN; 267 buf += ETH_GSTRING_LEN;
250 } 268 }
251 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { 269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
252 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 270 i++) {
253 ETH_GSTRING_LEN); 271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
254 buf += ETH_GSTRING_LEN; 272 ETH_GSTRING_LEN);
255 } 273 buf += ETH_GSTRING_LEN;
256 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 274 }
257 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
258 ETH_GSTRING_LEN);
259 buf += ETH_GSTRING_LEN;
260 } 275 }
261 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { 276
262 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 277 for (j = 0; j < adapter->num_rx_queues; j++) {
263 ETH_GSTRING_LEN); 278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
264 buf += ETH_GSTRING_LEN; 279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
265 } 289 }
290
266 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
267 memcpy(buf, vmxnet3_global_stats[i].desc, 292 memcpy(buf, vmxnet3_global_stats[i].desc,
268 ETH_GSTRING_LEN); 293 ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
277 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
278 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
279 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags;
280 306
281 if (data & ~ETH_FLAG_LRO) 307 if (data & ~ETH_FLAG_LRO)
282 return -EOPNOTSUPP; 308 return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 else 318 else
293 adapter->shared->devRead.misc.uptFeatures &= 319 adapter->shared->devRead.misc.uptFeatures &=
294 ~UPT1_F_LRO; 320 ~UPT1_F_LRO;
321 spin_lock_irqsave(&adapter->cmd_lock, flags);
295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
296 VMXNET3_CMD_UPDATE_FEATURE); 323 VMXNET3_CMD_UPDATE_FEATURE);
324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
297 } 325 }
298 return 0; 326 return 0;
299} 327}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
303 struct ethtool_stats *stats, u64 *buf) 331 struct ethtool_stats *stats, u64 *buf)
304{ 332{
305 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 unsigned long flags;
306 u8 *base; 335 u8 *base;
307 int i; 336 int i;
308 int j = 0; 337 int j = 0;
309 338
339 spin_lock_irqsave(&adapter->cmd_lock, flags);
310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
311 342
312 /* this does assume each counter is 64-bit wide */ 343 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */ 344 for (j = 0; j < adapter->num_tx_queues; j++) {
314 345 base = (u8 *)&adapter->tqd_start[j].stats;
315 base = (u8 *)&adapter->tqd_start[j].stats; 346 *buf++ = (u64)j;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 348 *buf++ = *(u64 *)(base +
318 349 vmxnet3_tq_dev_stats[i].offset);
319 base = (u8 *)&adapter->tx_queue[j].stats; 350
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 351 base = (u8 *)&adapter->tx_queue[j].stats;
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
322 353 *buf++ = *(u64 *)(base +
323 base = (u8 *)&adapter->rqd_start[j].stats; 354 vmxnet3_tq_driver_stats[i].offset);
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 355 }
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 356
327 base = (u8 *)&adapter->rx_queue[j].stats; 357 for (j = 0; j < adapter->num_tx_queues; j++) {
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 358 base = (u8 *)&adapter->rqd_start[j].stats;
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
363
364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
330 369
331 base = (u8 *)adapter; 370 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 378{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 380 u32 *buf = p;
342 int i = 0; 381 int i = 0, j = 0;
343 382
344 memset(p, 0, vmxnet3_get_regs_len(netdev)); 383 memset(p, 0, vmxnet3_get_regs_len(netdev));
345 384
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
348 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
349 388
350 /* make each ring use multiple of 16 bytes */ 389 /* make each ring use multiple of 16 bytes */
351/* TODO change this for multiple queues */ 390 for (i = 0; i < adapter->num_tx_queues; i++) {
352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill; 391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp; 392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen; 393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
355 buf[3] = 0; 394 buf[j++] = 0;
356 395
357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc; 396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
358 buf[5] = adapter->tx_queue[i].comp_ring.gen; 397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
359 buf[6] = adapter->tx_queue[i].stopped; 398 buf[j++] = adapter->tx_queue[i].stopped;
360 buf[7] = 0; 399 buf[j++] = 0;
361 400 }
362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; 401
363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; 402 for (i = 0; i < adapter->num_rx_queues; i++) {
364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen; 403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
365 buf[11] = 0; 404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
366 405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; 406 buf[j++] = 0;
368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; 407
369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen; 408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
370 buf[15] = 0; 409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
371 410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc; 411 buf[j++] = 0;
373 buf[17] = adapter->rx_queue[i].comp_ring.gen; 412
374 buf[18] = 0; 413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
375 buf[19] = 0; 414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
376} 419}
377 420
378 421
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p) 617 const struct ethtool_rxfh_indir *p)
575{ 618{
576 unsigned int i; 619 unsigned int i;
620 unsigned long flags;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579 623
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
592 for (i = 0; i < rssConf->indTableSize; i++) 636 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i]; 637 rssConf->indTable[i] = p->ring_index[i];
594 638
639 spin_lock_irqsave(&adapter->cmd_lock, flags);
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT); 641 VMXNET3_CMD_UPDATE_RSSIDT);
642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
597 643
598 return 0; 644 return 0;
599 645
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f03..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
289 289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ 292#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
293 293
294 294
295struct vmxnet3_intr { 295struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
318 struct vlan_group *vlan_grp; 318 struct vlan_group *vlan_grp;
319 struct vmxnet3_intr intr; 319 struct vmxnet3_intr intr;
320 spinlock_t cmd_lock;
320 struct Vmxnet3_DriverShared *shared; 321 struct Vmxnet3_DriverShared *shared;
321 struct Vmxnet3_PMConf *pm_conf; 322 struct Vmxnet3_PMConf *pm_conf;
322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 323 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..228d4f7a58af 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3690 if (status != VXGE_HW_OK) 3690 if (status != VXGE_HW_OK)
3691 goto exit; 3691 goto exit;
3692 3692
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table != 3694 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) 3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0; 3696 *data1 = 0;
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b568f1b0..c81a6512c683 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4120 "hotplug event.\n"); 4120 "hotplug event.\n");
4121 4121
4122out: 4122out:
4123 release_firmware(fw);
4123 return ret; 4124 return ret;
4124} 4125}
4125 4126
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 019a74d533a6..09ae4ef0fd51 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2294,6 +2294,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2294 int i; 2294 int i;
2295 bool needreset = false; 2295 bool needreset = false;
2296 2296
2297 mutex_lock(&sc->lock);
2298
2297 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { 2299 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2298 if (sc->txqs[i].setup) { 2300 if (sc->txqs[i].setup) {
2299 txq = &sc->txqs[i]; 2301 txq = &sc->txqs[i];
@@ -2321,6 +2323,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2321 ath5k_reset(sc, NULL, true); 2323 ath5k_reset(sc, NULL, true);
2322 } 2324 }
2323 2325
2326 mutex_unlock(&sc->lock);
2327
2324 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2328 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2325 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2329 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2326} 2330}
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0064be7ce5c9..21091c26a9a5 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
838 for (i = 0; i < qmax; i++) { 838 for (i = 0; i < qmax; i++) {
839 err = ath5k_hw_stop_tx_dma(ah, i); 839 err = ath5k_hw_stop_tx_dma(ah, i);
840 /* -EINVAL -> queue inactive */ 840 /* -EINVAL -> queue inactive */
841 if (err != -EINVAL) 841 if (err && err != -EINVAL)
842 return err; 842 return err;
843 } 843 }
844 844
845 return err; 845 return 0;
846} 846}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index e5f2b96a4c63..a702817daf72 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
86 if (!ah->ah_bwmode) { 86 if (!ah->ah_bwmode) {
87 dur = ieee80211_generic_frame_duration(sc->hw, 87 dur = ieee80211_generic_frame_duration(sc->hw,
88 NULL, len, rate); 88 NULL, len, rate);
89 return dur; 89 return le16_to_cpu(dur);
90 } 90 }
91 91
92 bitrate = rate->bitrate; 92 bitrate = rate->bitrate;
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
265 * what rate we should choose to TX ACKs. */ 265 * what rate we should choose to TX ACKs. */
266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
267 267
268 tx_time = le16_to_cpu(tx_time);
269
270 ath5k_hw_reg_write(ah, tx_time, reg); 268 ath5k_hw_reg_write(ah, tx_time, reg);
271 269
272 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 270 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa13e36..5e300bd3d264 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -679,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
679 679
680 /* Do NF cal only at longer intervals */ 680 /* Do NF cal only at longer intervals */
681 if (longcal || nfcal_pending) { 681 if (longcal || nfcal_pending) {
682 /* Do periodic PAOffset Cal */
683 ar9002_hw_pa_cal(ah, false);
684 ar9002_hw_olc_temp_compensation(ah);
685
686 /* 682 /*
687 * Get the value from the previous NF cal and update 683 * Get the value from the previous NF cal and update
688 * history buffer. 684 * history buffer.
@@ -697,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
697 ath9k_hw_loadnf(ah, ah->curchan); 693 ath9k_hw_loadnf(ah, ah->curchan);
698 } 694 }
699 695
700 if (longcal) 696 if (longcal) {
701 ath9k_hw_start_nfcal(ah, false); 697 ath9k_hw_start_nfcal(ah, false);
698 /* Do periodic PAOffset Cal */
699 ar9002_hw_pa_cal(ah, false);
700 ar9002_hw_olc_temp_compensation(ah);
701 }
702 } 702 }
703 703
704 return iscaldone; 704 return iscaldone;
@@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
954 &adc_dc_cal_multi_sample; 954 &adc_dc_cal_multi_sample;
955 } 955 }
956 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; 956 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
957
958 if (AR_SREV_9287(ah))
959 ah->supp_cals &= ~ADC_GAIN_CAL;
957 } 960 }
958} 961}
959 962
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f8a7771faee2..f44c84ab5dce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
426 } 426 }
427 427
428 /* WAR for ASPM system hang */ 428 /* WAR for ASPM system hang */
429 if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { 429 if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
430 val |= (AR_WA_BIT6 | AR_WA_BIT7); 430 val |= (AR_WA_BIT6 | AR_WA_BIT7);
431 }
432 431
433 if (AR_SREV_9285E_20(ah)) 432 if (AR_SREV_9285E_20(ah))
434 val |= AR_WA_BIT23; 433 val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 81f9cf294dec..9ecca93392e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = {
1842 1842
1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { 1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
1844 /* Addr allmodes */ 1844 /* Addr allmodes */
1845 {0x00004040, 0x08212e5e}, 1845 {0x00004040, 0x0821265e},
1846 {0x00004040, 0x0008003b}, 1846 {0x00004040, 0x0008003b},
1847 {0x00004044, 0x00000000}, 1847 {0x00004044, 0x00000000},
1848}; 1848};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6137634e46ca..06fb2c850535 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -146,8 +146,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
146 /* Sleep Setting */ 146 /* Sleep Setting */
147 147
148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
149 ar9300PciePhy_clkreq_enable_L1_2p2, 149 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
150 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), 150 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
151 2); 151 2);
152 152
153 /* Fast clock modal settings */ 153 /* Fast clock modal settings */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf54282..23838e37d45f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -218,6 +218,7 @@ struct ath_frame_info {
218struct ath_buf_state { 218struct ath_buf_state {
219 u8 bf_type; 219 u8 bf_type;
220 u8 bfs_paprd; 220 u8 bfs_paprd;
221 unsigned long bfs_paprd_timestamp;
221 enum ath9k_internal_frame_type bfs_ftype; 222 enum ath9k_internal_frame_type bfs_ftype;
222}; 223};
223 224
@@ -593,7 +594,6 @@ struct ath_softc {
593 struct work_struct paprd_work; 594 struct work_struct paprd_work;
594 struct work_struct hw_check_work; 595 struct work_struct hw_check_work;
595 struct completion paprd_complete; 596 struct completion paprd_complete;
596 bool paprd_pending;
597 597
598 u32 intrstatus; 598 u32 intrstatus;
599 u32 sc_flags; /* SC_OP_* */ 599 u32 sc_flags; /* SC_OP_* */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141f2006..749a93608664 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
226 eep->baseEepHeader.pwdclkind == 0) 226 eep->baseEepHeader.pwdclkind == 0)
227 ah->need_an_top2_fixup = 1; 227 ah->need_an_top2_fixup = 1;
228 228
229 if ((common->bus_ops->ath_bus_type == ATH_USB) &&
230 (AR_SREV_9280(ah)))
231 eep->modalHeader[0].xpaBiasLvl = 0;
232
229 return 0; 233 return 0;
230} 234}
231 235
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e87ed3..780ac5eac501 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -78,7 +78,7 @@ struct tx_frame_hdr {
78 u8 node_idx; 78 u8 node_idx;
79 u8 vif_idx; 79 u8 vif_idx;
80 u8 tidno; 80 u8 tidno;
81 u32 flags; /* ATH9K_HTC_TX_* */ 81 __be32 flags; /* ATH9K_HTC_TX_* */
82 u8 key_type; 82 u8 key_type;
83 u8 keyix; 83 u8 keyix;
84 u8 reserved[26]; 84 u8 reserved[26];
@@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
433void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, 433void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
434 enum htc_endpoint_id ep_id, bool txok); 434 enum htc_endpoint_id ep_id, bool txok);
435 435
436int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
436void ath9k_htc_station_work(struct work_struct *work); 437void ath9k_htc_station_work(struct work_struct *work);
437void ath9k_htc_aggr_work(struct work_struct *work); 438void ath9k_htc_aggr_work(struct work_struct *work);
438void ath9k_ani_work(struct work_struct *work);; 439void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 38433f9bfe59..0352f0994caa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
142{ 142{
143 ath9k_htc_exit_debug(priv->ah); 143 ath9k_htc_exit_debug(priv->ah);
144 ath9k_hw_deinit(priv->ah); 144 ath9k_hw_deinit(priv->ah);
145 tasklet_kill(&priv->swba_tasklet);
146 tasklet_kill(&priv->rx_tasklet);
147 tasklet_kill(&priv->tx_tasklet);
148 kfree(priv->ah); 145 kfree(priv->ah);
149 priv->ah = NULL; 146 priv->ah = NULL;
150} 147}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c938d16..6bb59958f71e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
301 301
302 priv->nstations++; 302 priv->nstations++;
303 303
304 /*
305 * Set chainmask etc. on the target.
306 */
307 ret = ath9k_htc_update_cap_target(priv);
308 if (ret)
309 ath_dbg(common, ATH_DBG_CONFIG,
310 "Failed to update capability in target\n");
311
312 priv->ah->is_monitoring = true;
313
304 return 0; 314 return 0;
305 315
306err_vif: 316err_vif:
@@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
328 } 338 }
329 339
330 priv->nstations--; 340 priv->nstations--;
341 priv->ah->is_monitoring = false;
331 342
332 return 0; 343 return 0;
333} 344}
@@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
419 return 0; 430 return 0;
420} 431}
421 432
422static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) 433int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
423{ 434{
424 struct ath9k_htc_cap_target tcap; 435 struct ath9k_htc_cap_target tcap;
425 int ret; 436 int ret;
@@ -1014,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1014 int ret = 0; 1025 int ret = 0;
1015 u8 cmd_rsp; 1026 u8 cmd_rsp;
1016 1027
1017 /* Cancel all the running timers/work .. */
1018 cancel_work_sync(&priv->fatal_work);
1019 cancel_work_sync(&priv->ps_work);
1020 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1021 ath9k_led_stop_brightness(priv);
1022
1023 mutex_lock(&priv->mutex); 1028 mutex_lock(&priv->mutex);
1024 1029
1025 if (priv->op_flags & OP_INVALID) { 1030 if (priv->op_flags & OP_INVALID) {
@@ -1033,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1033 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1038 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1034 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 1039 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1035 WMI_CMD(WMI_STOP_RECV_CMDID); 1040 WMI_CMD(WMI_STOP_RECV_CMDID);
1041
1042 tasklet_kill(&priv->swba_tasklet);
1043 tasklet_kill(&priv->rx_tasklet);
1044 tasklet_kill(&priv->tx_tasklet);
1045
1036 skb_queue_purge(&priv->tx_queue); 1046 skb_queue_purge(&priv->tx_queue);
1037 1047
1048 mutex_unlock(&priv->mutex);
1049
1050 /* Cancel all the running timers/work .. */
1051 cancel_work_sync(&priv->fatal_work);
1052 cancel_work_sync(&priv->ps_work);
1053 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1054 ath9k_led_stop_brightness(priv);
1055
1056 mutex_lock(&priv->mutex);
1057
1038 /* Remove monitor interface here */ 1058 /* Remove monitor interface here */
1039 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 1059 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1040 if (ath9k_htc_remove_monitor_interface(priv)) 1060 if (ath9k_htc_remove_monitor_interface(priv))
@@ -1186,6 +1206,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1186 } 1206 }
1187 } 1207 }
1188 1208
1209 /*
1210 * Monitor interface should be added before
1211 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
1212 */
1213 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1214 if (conf->flags & IEEE80211_CONF_MONITOR) {
1215 if (ath9k_htc_add_monitor_interface(priv))
1216 ath_err(common, "Failed to set monitor mode\n");
1217 else
1218 ath_dbg(common, ATH_DBG_CONFIG,
1219 "HW opmode set to Monitor mode\n");
1220 }
1221 }
1222
1189 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1223 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1190 struct ieee80211_channel *curchan = hw->conf.channel; 1224 struct ieee80211_channel *curchan = hw->conf.channel;
1191 int pos = curchan->hw_value; 1225 int pos = curchan->hw_value;
@@ -1221,16 +1255,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1221 ath_update_txpow(priv); 1255 ath_update_txpow(priv);
1222 } 1256 }
1223 1257
1224 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1225 if (conf->flags & IEEE80211_CONF_MONITOR) {
1226 if (ath9k_htc_add_monitor_interface(priv))
1227 ath_err(common, "Failed to set monitor mode\n");
1228 else
1229 ath_dbg(common, ATH_DBG_CONFIG,
1230 "HW opmode set to Monitor mode\n");
1231 }
1232 }
1233
1234 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1258 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1235 mutex_lock(&priv->htc_pm_lock); 1259 mutex_lock(&priv->htc_pm_lock);
1236 if (!priv->ps_idle) { 1260 if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 33f36029fa4f..7a5ffca21958 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -113,6 +113,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
113 113
114 if (ieee80211_is_data(fc)) { 114 if (ieee80211_is_data(fc)) {
115 struct tx_frame_hdr tx_hdr; 115 struct tx_frame_hdr tx_hdr;
116 u32 flags = 0;
116 u8 *qc; 117 u8 *qc;
117 118
118 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); 119 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -136,13 +137,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
136 /* Check for RTS protection */ 137 /* Check for RTS protection */
137 if (priv->hw->wiphy->rts_threshold != (u32) -1) 138 if (priv->hw->wiphy->rts_threshold != (u32) -1)
138 if (skb->len > priv->hw->wiphy->rts_threshold) 139 if (skb->len > priv->hw->wiphy->rts_threshold)
139 tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS; 140 flags |= ATH9K_HTC_TX_RTSCTS;
140 141
141 /* CTS-to-self */ 142 /* CTS-to-self */
142 if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) && 143 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
143 (priv->op_flags & OP_PROTECT_ENABLE)) 144 (priv->op_flags & OP_PROTECT_ENABLE))
144 tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY; 145 flags |= ATH9K_HTC_TX_CTSONLY;
145 146
147 tx_hdr.flags = cpu_to_be32(flags);
146 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); 148 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
147 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) 149 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
148 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; 150 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde978665e07..9f01e50d5cda 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -369,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
369 else 369 else
370 ah->config.ht_enable = 0; 370 ah->config.ht_enable = 0;
371 371
372 /* PAPRD needs some more work to be enabled */
373 ah->config.paprd_disable = 1;
374
372 ah->config.rx_intr_mitigation = true; 375 ah->config.rx_intr_mitigation = true;
373 ah->config.pcieSerDesWrite = true; 376 ah->config.pcieSerDesWrite = true;
374 377
@@ -436,9 +439,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
436 439
437static int ath9k_hw_post_init(struct ath_hw *ah) 440static int ath9k_hw_post_init(struct ath_hw *ah)
438{ 441{
442 struct ath_common *common = ath9k_hw_common(ah);
439 int ecode; 443 int ecode;
440 444
441 if (!AR_SREV_9271(ah)) { 445 if (common->bus_ops->ath_bus_type != ATH_USB) {
442 if (!ath9k_hw_chip_test(ah)) 446 if (!ath9k_hw_chip_test(ah))
443 return -ENODEV; 447 return -ENODEV;
444 } 448 }
@@ -1213,7 +1217,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1213 ah->txchainmask = common->tx_chainmask; 1217 ah->txchainmask = common->tx_chainmask;
1214 ah->rxchainmask = common->rx_chainmask; 1218 ah->rxchainmask = common->rx_chainmask;
1215 1219
1216 if (!ah->chip_fullsleep) { 1220 if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
1217 ath9k_hw_abortpcurecv(ah); 1221 ath9k_hw_abortpcurecv(ah);
1218 if (!ath9k_hw_stopdmarecv(ah)) { 1222 if (!ath9k_hw_stopdmarecv(ah)) {
1219 ath_dbg(common, ATH_DBG_XMIT, 1223 ath_dbg(common, ATH_DBG_XMIT,
@@ -1932,7 +1936,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1932 pCap->rx_status_len = sizeof(struct ar9003_rxs); 1936 pCap->rx_status_len = sizeof(struct ar9003_rxs);
1933 pCap->tx_desc_len = sizeof(struct ar9003_txc); 1937 pCap->tx_desc_len = sizeof(struct ar9003_txc);
1934 pCap->txs_len = sizeof(struct ar9003_txs); 1938 pCap->txs_len = sizeof(struct ar9003_txs);
1935 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 1939 if (!ah->config.paprd_disable &&
1940 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
1936 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 1941 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
1937 } else { 1942 } else {
1938 pCap->tx_desc_len = sizeof(struct ath_desc); 1943 pCap->tx_desc_len = sizeof(struct ath_desc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 5a3dfec45e96..ea9fde670646 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -225,6 +225,7 @@ struct ath9k_ops_config {
225 u32 pcie_waen; 225 u32 pcie_waen;
226 u8 analog_shiftreg; 226 u8 analog_shiftreg;
227 u8 ht_enable; 227 u8 ht_enable;
228 u8 paprd_disable;
228 u32 ofdm_trig_low; 229 u32 ofdm_trig_low;
229 u32 ofdm_trig_high; 230 u32 ofdm_trig_high;
230 u32 cck_trig_high; 231 u32 cck_trig_high;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 767d8b86f1e1..087a6a95edd5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -598,8 +598,6 @@ err_btcoex:
598err_queues: 598err_queues:
599 ath9k_hw_deinit(ah); 599 ath9k_hw_deinit(ah);
600err_hw: 600err_hw:
601 tasklet_kill(&sc->intr_tq);
602 tasklet_kill(&sc->bcon_tasklet);
603 601
604 kfree(ah); 602 kfree(ah);
605 sc->sc_ah = NULL; 603 sc->sc_ah = NULL;
@@ -807,9 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
807 805
808 ath9k_hw_deinit(sc->sc_ah); 806 ath9k_hw_deinit(sc->sc_ah);
809 807
810 tasklet_kill(&sc->intr_tq);
811 tasklet_kill(&sc->bcon_tasklet);
812
813 kfree(sc->sc_ah); 808 kfree(sc->sc_ah);
814 sc->sc_ah = NULL; 809 sc->sc_ah = NULL;
815} 810}
@@ -824,6 +819,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
824 wiphy_rfkill_stop_polling(sc->hw->wiphy); 819 wiphy_rfkill_stop_polling(sc->hw->wiphy);
825 ath_deinit_leds(sc); 820 ath_deinit_leds(sc);
826 821
822 ath9k_ps_restore(sc);
823
827 for (i = 0; i < sc->num_sec_wiphy; i++) { 824 for (i = 0; i < sc->num_sec_wiphy; i++) {
828 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 825 struct ath_wiphy *aphy = sc->sec_wiphy[i];
829 if (aphy == NULL) 826 if (aphy == NULL)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f90a6ca94a76..da5c64597c1f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
325{ 325{
326 struct ieee80211_hw *hw = sc->hw; 326 struct ieee80211_hw *hw = sc->hw;
327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
328 struct ath_hw *ah = sc->sc_ah;
329 struct ath_common *common = ath9k_hw_common(ah);
328 struct ath_tx_control txctl; 330 struct ath_tx_control txctl;
329 int time_left; 331 int time_left;
330 332
@@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
340 tx_info->control.rates[1].idx = -1; 342 tx_info->control.rates[1].idx = -1;
341 343
342 init_completion(&sc->paprd_complete); 344 init_completion(&sc->paprd_complete);
343 sc->paprd_pending = true;
344 txctl.paprd = BIT(chain); 345 txctl.paprd = BIT(chain);
345 if (ath_tx_start(hw, skb, &txctl) != 0) 346
347 if (ath_tx_start(hw, skb, &txctl) != 0) {
348 ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
349 dev_kfree_skb_any(skb);
346 return false; 350 return false;
351 }
347 352
348 time_left = wait_for_completion_timeout(&sc->paprd_complete, 353 time_left = wait_for_completion_timeout(&sc->paprd_complete,
349 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); 354 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
350 sc->paprd_pending = false;
351 355
352 if (!time_left) 356 if (!time_left)
353 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, 357 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -592,14 +596,12 @@ void ath9k_tasklet(unsigned long data)
592 u32 status = sc->intrstatus; 596 u32 status = sc->intrstatus;
593 u32 rxmask; 597 u32 rxmask;
594 598
595 ath9k_ps_wakeup(sc);
596
597 if (status & ATH9K_INT_FATAL) { 599 if (status & ATH9K_INT_FATAL) {
598 ath_reset(sc, true); 600 ath_reset(sc, true);
599 ath9k_ps_restore(sc);
600 return; 601 return;
601 } 602 }
602 603
604 ath9k_ps_wakeup(sc);
603 spin_lock(&sc->sc_pcu_lock); 605 spin_lock(&sc->sc_pcu_lock);
604 606
605 if (!ath9k_hw_check_alive(ah)) 607 if (!ath9k_hw_check_alive(ah))
@@ -955,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
955 957
956 spin_unlock_bh(&sc->sc_pcu_lock); 958 spin_unlock_bh(&sc->sc_pcu_lock);
957 ath9k_ps_restore(sc); 959 ath9k_ps_restore(sc);
958
959 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
960} 960}
961 961
962int ath_reset(struct ath_softc *sc, bool retry_tx) 962int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -969,6 +969,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
969 /* Stop ANI */ 969 /* Stop ANI */
970 del_timer_sync(&common->ani.timer); 970 del_timer_sync(&common->ani.timer);
971 971
972 ath9k_ps_wakeup(sc);
972 spin_lock_bh(&sc->sc_pcu_lock); 973 spin_lock_bh(&sc->sc_pcu_lock);
973 974
974 ieee80211_stop_queues(hw); 975 ieee80211_stop_queues(hw);
@@ -1015,6 +1016,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1015 1016
1016 /* Start ANI */ 1017 /* Start ANI */
1017 ath_start_ani(common); 1018 ath_start_ani(common);
1019 ath9k_ps_restore(sc);
1018 1020
1019 return r; 1021 return r;
1020} 1022}
@@ -1309,6 +1311,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1309 1311
1310 spin_lock_bh(&sc->sc_pcu_lock); 1312 spin_lock_bh(&sc->sc_pcu_lock);
1311 1313
1314 /* prevent tasklets to enable interrupts once we disable them */
1315 ah->imask &= ~ATH9K_INT_GLOBAL;
1316
1312 /* make sure h/w will not generate any interrupt 1317 /* make sure h/w will not generate any interrupt
1313 * before setting the invalid flag. */ 1318 * before setting the invalid flag. */
1314 ath9k_hw_disable_interrupts(ah); 1319 ath9k_hw_disable_interrupts(ah);
@@ -1326,6 +1331,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1326 1331
1327 spin_unlock_bh(&sc->sc_pcu_lock); 1332 spin_unlock_bh(&sc->sc_pcu_lock);
1328 1333
1334 /* we can now sync irq and kill any running tasklets, since we already
1335 * disabled interrupts and not holding a spin lock */
1336 synchronize_irq(sc->irq);
1337 tasklet_kill(&sc->intr_tq);
1338 tasklet_kill(&sc->bcon_tasklet);
1339
1329 ath9k_ps_restore(sc); 1340 ath9k_ps_restore(sc);
1330 1341
1331 sc->ps_idle = true; 1342 sc->ps_idle = true;
@@ -1701,7 +1712,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1701skip_chan_change: 1712skip_chan_change:
1702 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1713 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1703 sc->config.txpowlimit = 2 * conf->power_level; 1714 sc->config.txpowlimit = 2 * conf->power_level;
1715 ath9k_ps_wakeup(sc);
1704 ath_update_txpow(sc); 1716 ath_update_txpow(sc);
1717 ath9k_ps_restore(sc);
1705 } 1718 }
1706 1719
1707 spin_lock_bh(&sc->wiphy_lock); 1720 spin_lock_bh(&sc->wiphy_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 332d1feb5c18..07b7804aec5b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, 1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd); 1726 bf->bf_state.bfs_paprd);
1727 1727
1728 if (txctl->paprd)
1729 bf->bf_state.bfs_paprd_timestamp = jiffies;
1730
1728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1731 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1729 } 1732 }
1730 1733
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1886 bf->bf_buf_addr = 0; 1889 bf->bf_buf_addr = 0;
1887 1890
1888 if (bf->bf_state.bfs_paprd) { 1891 if (bf->bf_state.bfs_paprd) {
1889 if (!sc->paprd_pending) 1892 if (time_after(jiffies,
1893 bf->bf_state.bfs_paprd_timestamp +
1894 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1890 dev_kfree_skb_any(skb); 1895 dev_kfree_skb_any(skb);
1891 else 1896 else
1892 complete(&sc->paprd_complete); 1897 complete(&sc->paprd_complete);
@@ -2113,9 +2118,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2113 if (needreset) { 2118 if (needreset) {
2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2119 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2115 "tx hung, resetting the chip\n"); 2120 "tx hung, resetting the chip\n");
2116 ath9k_ps_wakeup(sc);
2117 ath_reset(sc, true); 2121 ath_reset(sc, true);
2118 ath9k_ps_restore(sc);
2119 } 2122 }
2120 2123
2121 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2124 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); 564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
565 565
566 /* 2. Maybe the AP wants to send multicast/broadcast data? */ 566 /* 2. Maybe the AP wants to send multicast/broadcast data? */
567 cam = !!(tim_ie->bitmap_ctrl & 0x01); 567 cam |= !!(tim_ie->bitmap_ctrl & 0x01);
568 568
569 if (!cam) { 569 if (!cam) {
570 /* back to low-power land. */ 570 /* back to low-power land. */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a4134edeb..2176edede39b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
518 hw_priv->link = link; 518 hw_priv->link = link;
519 519
520 /* 520 /*
521 * Make sure the IRQ handler cannot proceed until at least 521 * We enable IRQ here, but IRQ handler will not proceed
522 * dev->base_addr is initialized. 522 * until dev->base_addr is set below. This protect us from
523 * receive interrupts when driver is not initialized.
523 */ 524 */
524 spin_lock_irqsave(&local->irq_init_lock, flags);
525
526 ret = pcmcia_request_irq(link, prism2_interrupt); 525 ret = pcmcia_request_irq(link, prism2_interrupt);
527 if (ret) 526 if (ret)
528 goto failed_unlock; 527 goto failed;
529 528
530 ret = pcmcia_enable_device(link); 529 ret = pcmcia_enable_device(link);
531 if (ret) 530 if (ret)
532 goto failed_unlock; 531 goto failed;
533 532
533 spin_lock_irqsave(&local->irq_init_lock, flags);
534 dev->irq = link->irq; 534 dev->irq = link->irq;
535 dev->base_addr = link->resource[0]->start; 535 dev->base_addr = link->resource[0]->start;
536
537 spin_unlock_irqrestore(&local->irq_init_lock, flags); 536 spin_unlock_irqrestore(&local->irq_init_lock, flags);
538 537
539 local->shutdown = 0; 538 local->shutdown = 0;
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
546 545
547 return ret; 546 return ret;
548 547
549 failed_unlock:
550 spin_unlock_irqrestore(&local->irq_init_lock, flags);
551 failed: 548 failed:
552 kfree(hw_priv); 549 kfree(hw_priv);
553 prism2_release((u_long)link); 550 prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f6f46f..ae438ed80c2f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1973 1973
1974 inta = ipw_read32(priv, IPW_INTA_RW); 1974 inta = ipw_read32(priv, IPW_INTA_RW);
1975 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1975 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1976
1977 if (inta == 0xFFFFFFFF) {
1978 /* Hardware disappeared */
1979 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1980 /* Only handle the cached INTA values */
1981 inta = 0;
1982 }
1976 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1983 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1977 1984
1978 /* Add any cached INTA values that need to be handled */ 1985 /* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3f1e5f1bf847..91a9f5253469 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2624,6 +2624,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2624 .fw_name_pre = IWL4965_FW_PRE, 2624 .fw_name_pre = IWL4965_FW_PRE,
2625 .ucode_api_max = IWL4965_UCODE_API_MAX, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX,
2626 .ucode_api_min = IWL4965_UCODE_API_MIN, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN,
2627 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2627 .valid_tx_ant = ANT_AB, 2628 .valid_tx_ant = ANT_AB,
2628 .valid_rx_ant = ANT_ABC, 2629 .valid_rx_ant = ANT_ABC,
2629 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2630 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bcd7ae0..ef36aff1bb43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
681 .fw_name_pre = IWL6050_FW_PRE, \ 681 .fw_name_pre = IWL6050_FW_PRE, \
682 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 682 .ucode_api_max = IWL6050_UCODE_API_MAX, \
683 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 683 .ucode_api_min = IWL6050_UCODE_API_MIN, \
684 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
685 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
684 .ops = &iwl6050_ops, \ 686 .ops = &iwl6050_ops, \
685 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 687 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
686 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 688 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 97906dd442e6..27b5a3eec9dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -152,11 +152,14 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
152 152
153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); 153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
154 154
155 priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> 155 if (!priv->cfg->sku) {
156 /* not using sku overwrite */
157 priv->cfg->sku =
158 ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
156 EEPROM_SKU_CAP_BAND_POS); 159 EEPROM_SKU_CAP_BAND_POS);
157 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) 160 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
158 priv->cfg->sku |= IWL_SKU_N; 161 priv->cfg->sku |= IWL_SKU_N;
159 162 }
160 if (!priv->cfg->sku) { 163 if (!priv->cfg->sku) {
161 IWL_ERR(priv, "Invalid device sku\n"); 164 IWL_ERR(priv, "Invalid device sku\n");
162 return -EINVAL; 165 return -EINVAL;
@@ -168,7 +171,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
168 /* not using .cfg overwrite */ 171 /* not using .cfg overwrite */
169 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 172 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
170 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 173 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
171 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 174 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
172 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { 175 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
173 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", 176 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
174 priv->cfg->valid_tx_ant, 177 priv->cfg->valid_tx_ant,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36335b1b54d4..c1cfd9952e52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1157 /* only Re-enable if disabled by irq */ 1157 /* only Re-enable if disabled by irq */
1158 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1158 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1159 iwl_enable_interrupts(priv); 1159 iwl_enable_interrupts(priv);
1160 /* Re-enable RF_KILL if it occurred */
1161 else if (handled & CSR_INT_BIT_RF_KILL)
1162 iwl_enable_rfkill_int(priv);
1160 1163
1161#ifdef CONFIG_IWLWIFI_DEBUG 1164#ifdef CONFIG_IWLWIFI_DEBUG
1162 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1165 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1371 /* only Re-enable if disabled by irq */ 1374 /* only Re-enable if disabled by irq */
1372 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1375 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1373 iwl_enable_interrupts(priv); 1376 iwl_enable_interrupts(priv);
1377 /* Re-enable RF_KILL if it occurred */
1378 else if (handled & CSR_INT_BIT_RF_KILL)
1379 iwl_enable_rfkill_int(priv);
1374} 1380}
1375 1381
1376/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ 1382/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69ebf2a94..5091d77e02ce 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); 126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
127 if (!ndev) { 127 if (!ndev) {
128 dev_err(dev, "no memory for network device instance\n"); 128 dev_err(dev, "no memory for network device instance\n");
129 ret = -ENOMEM;
129 goto out_priv; 130 goto out_priv;
130 } 131 }
131 132
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
138 GFP_KERNEL); 139 GFP_KERNEL);
139 if (!iwm->umac_profile) { 140 if (!iwm->umac_profile) {
140 dev_err(dev, "Couldn't alloc memory for profile\n"); 141 dev_err(dev, "Couldn't alloc memory for profile\n");
142 ret = -ENOMEM;
141 goto out_profile; 143 goto out_profile;
142 } 144 }
143 145
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a7dc7..f618b9623e5a 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
618 else 618 else
619 *burst_possible = false; 619 *burst_possible = false;
620 620
621 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 621 if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
622 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; 622 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
623 623
624 if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) 624 if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb72befc..be0ff78c1b16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
58 58
59 if (!fw || !fw->size || !fw->data) { 59 if (!fw || !fw->size || !fw->data) {
60 ERROR(rt2x00dev, "Failed to read Firmware.\n"); 60 ERROR(rt2x00dev, "Failed to read Firmware.\n");
61 release_firmware(fw);
61 return -ENOENT; 62 return -ENOENT;
62 } 63 }
63 64
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 73631c6fbb30..ace0b668c04e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -363,12 +363,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
363 struct rt2x00_dev *rt2x00dev = hw->priv; 363 struct rt2x00_dev *rt2x00dev = hw->priv;
364 364
365 if (pci_set_power_state(pci_dev, PCI_D0) || 365 if (pci_set_power_state(pci_dev, PCI_D0) ||
366 pci_enable_device(pci_dev) || 366 pci_enable_device(pci_dev)) {
367 pci_restore_state(pci_dev)) {
368 ERROR(rt2x00dev, "Failed to resume device.\n"); 367 ERROR(rt2x00dev, "Failed to resume device.\n");
369 return -EIO; 368 return -EIO;
370 } 369 }
371 370
371 pci_restore_state(pci_dev);
372 return rt2x00lib_resume(rt2x00dev); 372 return rt2x00lib_resume(rt2x00dev);
373} 373}
374EXPORT_SYMBOL_GPL(rt2x00pci_resume); 374EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0b4e8590cbb7..029be3c6c030 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2446,6 +2446,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
2449 { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
2449 /* Qcom */ 2450 /* Qcom */
2450 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2451 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
2451 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, 2452 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index b8433f3a9bc2..62876cd5c41a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
726} 726}
727 727
728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, 728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
729 u8 efuse_data, u8 offset, int *bcontinual, 729 u8 efuse_data, u8 offset, int *bcontinual,
730 u8 *write_state, struct pgpkt_struct target_pkt, 730 u8 *write_state, struct pgpkt_struct *target_pkt,
731 int *repeat_times, int *bresult, u8 word_en) 731 int *repeat_times, int *bresult, u8 word_en)
732{ 732{
733 struct rtl_priv *rtlpriv = rtl_priv(hw); 733 struct rtl_priv *rtlpriv = rtl_priv(hw);
734 struct pgpkt_struct tmp_pkt; 734 struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
744 tmp_pkt.word_en = tmp_header & 0x0F; 744 tmp_pkt.word_en = tmp_header & 0x0F;
745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
746 746
747 if (tmp_pkt.offset != target_pkt.offset) { 747 if (tmp_pkt.offset != target_pkt->offset) {
748 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 748 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
749 *write_state = PG_STATE_HEADER; 749 *write_state = PG_STATE_HEADER;
750 } else { 750 } else {
751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { 751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
756 } 756 }
757 757
758 if (bdataempty == false) { 758 if (bdataempty == false) {
759 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 759 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
760 *write_state = PG_STATE_HEADER; 760 *write_state = PG_STATE_HEADER;
761 } else { 761 } else {
762 match_word_en = 0x0F; 762 match_word_en = 0x0F;
763 if (!((target_pkt.word_en & BIT(0)) | 763 if (!((target_pkt->word_en & BIT(0)) |
764 (tmp_pkt.word_en & BIT(0)))) 764 (tmp_pkt.word_en & BIT(0))))
765 match_word_en &= (~BIT(0)); 765 match_word_en &= (~BIT(0));
766 766
767 if (!((target_pkt.word_en & BIT(1)) | 767 if (!((target_pkt->word_en & BIT(1)) |
768 (tmp_pkt.word_en & BIT(1)))) 768 (tmp_pkt.word_en & BIT(1))))
769 match_word_en &= (~BIT(1)); 769 match_word_en &= (~BIT(1));
770 770
771 if (!((target_pkt.word_en & BIT(2)) | 771 if (!((target_pkt->word_en & BIT(2)) |
772 (tmp_pkt.word_en & BIT(2)))) 772 (tmp_pkt.word_en & BIT(2))))
773 match_word_en &= (~BIT(2)); 773 match_word_en &= (~BIT(2));
774 774
775 if (!((target_pkt.word_en & BIT(3)) | 775 if (!((target_pkt->word_en & BIT(3)) |
776 (tmp_pkt.word_en & BIT(3)))) 776 (tmp_pkt.word_en & BIT(3))))
777 match_word_en &= (~BIT(3)); 777 match_word_en &= (~BIT(3));
778 778
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
780 badworden = efuse_word_enable_data_write( 780 badworden = efuse_word_enable_data_write(
781 hw, *efuse_addr + 1, 781 hw, *efuse_addr + 1,
782 tmp_pkt.word_en, 782 tmp_pkt.word_en,
783 target_pkt.data); 783 target_pkt->data);
784 784
785 if (0x0F != (badworden & 0x0F)) { 785 if (0x0F != (badworden & 0x0F)) {
786 u8 reorg_offset = offset; 786 u8 reorg_offset = offset;
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
791 } 791 }
792 792
793 tmp_word_en = 0x0F; 793 tmp_word_en = 0x0F;
794 if ((target_pkt.word_en & BIT(0)) ^ 794 if ((target_pkt->word_en & BIT(0)) ^
795 (match_word_en & BIT(0))) 795 (match_word_en & BIT(0)))
796 tmp_word_en &= (~BIT(0)); 796 tmp_word_en &= (~BIT(0));
797 797
798 if ((target_pkt.word_en & BIT(1)) ^ 798 if ((target_pkt->word_en & BIT(1)) ^
799 (match_word_en & BIT(1))) 799 (match_word_en & BIT(1)))
800 tmp_word_en &= (~BIT(1)); 800 tmp_word_en &= (~BIT(1));
801 801
802 if ((target_pkt.word_en & BIT(2)) ^ 802 if ((target_pkt->word_en & BIT(2)) ^
803 (match_word_en & BIT(2))) 803 (match_word_en & BIT(2)))
804 tmp_word_en &= (~BIT(2)); 804 tmp_word_en &= (~BIT(2));
805 805
806 if ((target_pkt.word_en & BIT(3)) ^ 806 if ((target_pkt->word_en & BIT(3)) ^
807 (match_word_en & BIT(3))) 807 (match_word_en & BIT(3)))
808 tmp_word_en &= (~BIT(3)); 808 tmp_word_en &= (~BIT(3));
809 809
810 if ((tmp_word_en & 0x0F) != 0x0F) { 810 if ((tmp_word_en & 0x0F) != 0x0F) {
811 *efuse_addr = efuse_get_current_size(hw); 811 *efuse_addr = efuse_get_current_size(hw);
812 target_pkt.offset = offset; 812 target_pkt->offset = offset;
813 target_pkt.word_en = tmp_word_en; 813 target_pkt->word_en = tmp_word_en;
814 } else 814 } else
815 *bcontinual = false; 815 *bcontinual = false;
816 *write_state = PG_STATE_HEADER; 816 *write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
821 } 821 }
822 } else { 822 } else {
823 *efuse_addr += (2 * tmp_word_cnts) + 1; 823 *efuse_addr += (2 * tmp_word_cnts) + 1;
824 target_pkt.offset = offset; 824 target_pkt->offset = offset;
825 target_pkt.word_en = word_en; 825 target_pkt->word_en = word_en;
826 *write_state = PG_STATE_HEADER; 826 *write_state = PG_STATE_HEADER;
827 } 827 }
828 } 828 }
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
938 efuse_write_data_case1(hw, &efuse_addr, 938 efuse_write_data_case1(hw, &efuse_addr,
939 efuse_data, offset, 939 efuse_data, offset,
940 &bcontinual, 940 &bcontinual,
941 &write_state, target_pkt, 941 &write_state, &target_pkt,
942 &repeat_times, &bresult, 942 &repeat_times, &bresult,
943 word_en); 943 word_en);
944 else 944 else
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0fa36aa6701a..1758d4463247 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -619,6 +619,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
619 struct sk_buff *uskb = NULL; 619 struct sk_buff *uskb = NULL;
620 u8 *pdata; 620 u8 *pdata;
621 uskb = dev_alloc_skb(skb->len + 128); 621 uskb = dev_alloc_skb(skb->len + 128);
622 if (!uskb) {
623 RT_TRACE(rtlpriv,
624 (COMP_INTR | COMP_RECV),
625 DBG_EMERG,
626 ("can't alloc rx skb\n"));
627 goto done;
628 }
622 memcpy(IEEE80211_SKB_RXCB(uskb), 629 memcpy(IEEE80211_SKB_RXCB(uskb),
623 &rx_status, 630 &rx_status,
624 sizeof(rx_status)); 631 sizeof(rx_status));
@@ -641,7 +648,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
641 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 648 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
642 if (unlikely(!new_skb)) { 649 if (unlikely(!new_skb)) {
643 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), 650 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
644 DBG_DMESG, 651 DBG_EMERG,
645 ("can't alloc skb for rx\n")); 652 ("can't alloc skb for rx\n"));
646 goto done; 653 goto done;
647 } 654 }
@@ -1066,9 +1073,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1066 struct sk_buff *skb = 1073 struct sk_buff *skb =
1067 dev_alloc_skb(rtlpci->rxbuffersize); 1074 dev_alloc_skb(rtlpci->rxbuffersize);
1068 u32 bufferaddress; 1075 u32 bufferaddress;
1069 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1070 if (!skb) 1076 if (!skb)
1071 return 0; 1077 return 0;
1078 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1072 1079
1073 /*skb->dev = dev; */ 1080 /*skb->dev = dev; */
1074 1081
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4016fe..40372bac9482 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1039 1039
1040 if (changed & BSS_CHANGED_BEACON) { 1040 if (changed & BSS_CHANGED_BEACON) {
1041 beacon = ieee80211_beacon_get(hw, vif); 1041 beacon = ieee80211_beacon_get(hw, vif);
1042 if (!beacon)
1043 goto out_sleep;
1044
1042 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, 1045 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
1043 beacon->len); 1046 beacon->len);
1044 1047
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 46714910f98c..7145ea543783 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 spi_message_add_tail(&t, &m); 110 spi_message_add_tail(&t, &m);
111 111
112 spi_sync(wl_to_spi(wl), &m); 112 spi_sync(wl_to_spi(wl), &m);
113 kfree(cmd);
114
115 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 113 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
114 kfree(cmd);
116} 115}
117 116
118static void wl1271_spi_init(struct wl1271 *wl) 117static void wl1271_spi_init(struct wl1271 *wl)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de5749824..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 int rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS